2024-11-15 09:34:14,213 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 09:34:14,225 main DEBUG Took 0.010002 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-15 09:34:14,226 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-15 09:34:14,226 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-15 09:34:14,227 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-15 09:34:14,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,235 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-15 09:34:14,247 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,248 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,249 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,249 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,250 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,250 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,251 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,251 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,252 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,252 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,253 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,253 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,254 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,254 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,255 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,255 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,256 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,256 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,256 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,257 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,257 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,258 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,258 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 09:34:14,258 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,258 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-15 09:34:14,260 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 09:34:14,261 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-15 09:34:14,263 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-15 09:34:14,264 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-15 09:34:14,265 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-15 09:34:14,265 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-15 09:34:14,273 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-15 09:34:14,275 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-15 09:34:14,277 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-15 09:34:14,277 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-15 09:34:14,278 main DEBUG createAppenders(={Console}) 2024-11-15 09:34:14,279 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-15 09:34:14,279 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 09:34:14,279 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-15 09:34:14,280 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-15 09:34:14,280 main DEBUG OutputStream closed 2024-11-15 09:34:14,280 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-15 09:34:14,281 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-15 09:34:14,281 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-15 09:34:14,355 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-15 09:34:14,357 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-15 09:34:14,358 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-15 09:34:14,359 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-15 09:34:14,360 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-15 09:34:14,360 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-15 09:34:14,360 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-15 09:34:14,361 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-15 09:34:14,361 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-15 09:34:14,361 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-15 09:34:14,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-15 09:34:14,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-15 09:34:14,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-15 09:34:14,362 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-15 09:34:14,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-15 09:34:14,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-15 09:34:14,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-15 09:34:14,364 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-15 09:34:14,366 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15 09:34:14,367 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-15 09:34:14,367 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-15 09:34:14,367 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-15T09:34:14,593 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e 2024-11-15 09:34:14,596 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-15 09:34:14,596 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15T09:34:14,604 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-15T09:34:14,640 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=208, ProcessCount=11, AvailableMemoryMB=4658 2024-11-15T09:34:14,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:34:14,658 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d, deleteOnExit=true 2024-11-15T09:34:14,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:34:14,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/test.cache.data in system properties and HBase conf 2024-11-15T09:34:14,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:34:14,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:34:14,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:34:14,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:34:14,662 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:34:14,747 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-15T09:34:14,848 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:34:14,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:34:14,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:34:14,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:34:14,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:34:14,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:34:14,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:34:14,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:34:14,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:34:14,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:34:14,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:34:14,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:34:14,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:34:14,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:34:14,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:34:15,331 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:34:15,894 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T09:34:15,979 INFO [Time-limited test {}] log.Log(170): Logging initialized @2411ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-15T09:34:16,051 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:34:16,114 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:34:16,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:34:16,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:34:16,135 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:34:16,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:34:16,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:34:16,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:34:16,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/java.io.tmpdir/jetty-localhost-33801-hadoop-hdfs-3_4_1-tests_jar-_-any-9895089422622679569/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:34:16,333 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33801} 2024-11-15T09:34:16,333 INFO [Time-limited test {}] server.Server(415): Started @2766ms 2024-11-15T09:34:16,358 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:34:17,036 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:34:17,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:34:17,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:34:17,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:34:17,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:34:17,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:34:17,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:34:17,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/java.io.tmpdir/jetty-localhost-38327-hadoop-hdfs-3_4_1-tests_jar-_-any-4809306221546907569/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:34:17,155 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:38327} 2024-11-15T09:34:17,155 INFO [Time-limited test {}] server.Server(415): Started @3588ms 2024-11-15T09:34:17,208 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:34:17,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:34:17,326 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:34:17,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:34:17,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:34:17,327 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:34:17,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:34:17,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:34:17,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/java.io.tmpdir/jetty-localhost-45805-hadoop-hdfs-3_4_1-tests_jar-_-any-7195961836612556941/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:34:17,461 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:45805} 2024-11-15T09:34:17,461 INFO [Time-limited test {}] server.Server(415): Started @3894ms 2024-11-15T09:34:17,464 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:34:18,836 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data4/current/BP-1421409377-172.17.0.2-1731663255408/current, will proceed with Du for space computation calculation, 2024-11-15T09:34:18,836 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data2/current/BP-1421409377-172.17.0.2-1731663255408/current, will proceed with Du for space computation calculation, 2024-11-15T09:34:18,836 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data3/current/BP-1421409377-172.17.0.2-1731663255408/current, will proceed with Du for space computation calculation, 2024-11-15T09:34:18,836 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data1/current/BP-1421409377-172.17.0.2-1731663255408/current, will proceed with Du for space computation calculation, 2024-11-15T09:34:18,872 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:34:18,872 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:34:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98bf8bab2faca34d with lease ID 0xe46d0992db5ce239: Processing first storage report for DS-d9e1f711-328c-45ec-ba2e-cd2db2435942 from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4e0e27a7-9048-4e32-970c-0f5f293f638d, infoPort=39345, infoSecurePort=0, ipcPort=40895, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408) 2024-11-15T09:34:18,921 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98bf8bab2faca34d with lease ID 0xe46d0992db5ce239: from storage DS-d9e1f711-328c-45ec-ba2e-cd2db2435942 node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4e0e27a7-9048-4e32-970c-0f5f293f638d, infoPort=39345, infoSecurePort=0, ipcPort=40895, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T09:34:18,921 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55c1cda6757eee08 with lease ID 0xe46d0992db5ce238: Processing first storage report for DS-9d19dc5e-c159-488e-aa93-2db2b763588f from datanode DatanodeRegistration(127.0.0.1:33343, datanodeUuid=96165d34-4806-4140-bf21-9f241bc57699, infoPort=38201, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408) 2024-11-15T09:34:18,921 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55c1cda6757eee08 with lease ID 0xe46d0992db5ce238: from storage DS-9d19dc5e-c159-488e-aa93-2db2b763588f node DatanodeRegistration(127.0.0.1:33343, datanodeUuid=96165d34-4806-4140-bf21-9f241bc57699, infoPort=38201, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:34:18,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98bf8bab2faca34d with lease ID 0xe46d0992db5ce239: Processing first storage report for DS-0e5b30d0-79b2-48fb-86fe-942024719383 from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4e0e27a7-9048-4e32-970c-0f5f293f638d, infoPort=39345, infoSecurePort=0, ipcPort=40895, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408) 2024-11-15T09:34:18,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98bf8bab2faca34d with lease ID 0xe46d0992db5ce239: from storage DS-0e5b30d0-79b2-48fb-86fe-942024719383 node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4e0e27a7-9048-4e32-970c-0f5f293f638d, infoPort=39345, infoSecurePort=0, ipcPort=40895, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T09:34:18,923 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55c1cda6757eee08 with lease ID 0xe46d0992db5ce238: Processing first storage report for DS-a6195dfa-0ded-4b5e-8e17-724d4bcfe19f from datanode DatanodeRegistration(127.0.0.1:33343, datanodeUuid=96165d34-4806-4140-bf21-9f241bc57699, infoPort=38201, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408) 2024-11-15T09:34:18,923 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55c1cda6757eee08 with lease ID 0xe46d0992db5ce238: from storage DS-a6195dfa-0ded-4b5e-8e17-724d4bcfe19f node DatanodeRegistration(127.0.0.1:33343, datanodeUuid=96165d34-4806-4140-bf21-9f241bc57699, infoPort=38201, infoSecurePort=0, ipcPort=32919, storageInfo=lv=-57;cid=testClusterID;nsid=1257839600;c=1731663255408), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:34:18,992 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e 2024-11-15T09:34:19,057 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/zookeeper_0, clientPort=56657, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:34:19,067 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56657 2024-11-15T09:34:19,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:19,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:19,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:34:19,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:34:19,689 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7 with version=8 2024-11-15T09:34:19,689 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:34:19,777 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-15T09:34:20,021 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:34:20,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:34:20,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:34:20,035 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:34:20,035 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:34:20,035 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:34:20,177 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:34:20,231 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-15T09:34:20,239 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-15T09:34:20,242 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:34:20,264 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 102411 (auto-detected) 2024-11-15T09:34:20,265 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-15T09:34:20,283 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37481 2024-11-15T09:34:20,303 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37481 connecting to ZooKeeper ensemble=127.0.0.1:56657 2024-11-15T09:34:20,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:374810x0, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:34:20,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37481-0x1013dd8614b0000 connected 2024-11-15T09:34:20,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:20,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:20,552 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:34:20,555 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7, hbase.cluster.distributed=false 2024-11-15T09:34:20,577 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:34:20,582 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37481 2024-11-15T09:34:20,582 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37481 2024-11-15T09:34:20,583 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37481 2024-11-15T09:34:20,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37481 2024-11-15T09:34:20,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37481 2024-11-15T09:34:20,693 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:34:20,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:34:20,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:34:20,695 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:34:20,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:34:20,695 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:34:20,698 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:34:20,701 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:34:20,702 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39761 2024-11-15T09:34:20,705 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39761 connecting to ZooKeeper ensemble=127.0.0.1:56657 2024-11-15T09:34:20,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:20,713 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:20,742 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397610x0, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:34:20,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39761-0x1013dd8614b0001 connected 2024-11-15T09:34:20,743 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:397610x0, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:34:20,747 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:34:20,755 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:34:20,758 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:34:20,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:34:20,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39761 2024-11-15T09:34:20,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39761 2024-11-15T09:34:20,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39761 2024-11-15T09:34:20,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39761 2024-11-15T09:34:20,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39761 2024-11-15T09:34:20,785 DEBUG [M:0;791f12959b23:37481 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:37481 2024-11-15T09:34:20,786 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,37481,1731663259873 2024-11-15T09:34:20,810 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:34:20,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:34:20,812 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,37481,1731663259873 2024-11-15T09:34:20,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:20,836 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:34:20,836 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:20,838 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:34:20,839 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,37481,1731663259873 from backup master directory 2024-11-15T09:34:20,851 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:34:20,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,37481,1731663259873 2024-11-15T09:34:20,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:34:20,852 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:34:20,852 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,37481,1731663259873 2024-11-15T09:34:20,854 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-15T09:34:20,855 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-15T09:34:20,910 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase.id] with ID: 9cd43f53-8228-4e31-b0a5-8d4391cfb640 2024-11-15T09:34:20,910 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/.tmp/hbase.id 2024-11-15T09:34:20,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:34:20,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:34:21,333 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/.tmp/hbase.id]:[hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase.id] 2024-11-15T09:34:21,383 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:21,390 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:34:21,408 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-15T09:34:21,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:21,441 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:21,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:34:21,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:34:21,477 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:34:21,479 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:34:21,484 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:34:21,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:34:21,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:34:21,527 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store 2024-11-15T09:34:21,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:34:21,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:34:21,551 INFO [master/791f12959b23:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-15T09:34:21,555 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:34:21,556 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:34:21,556 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:34:21,557 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:34:21,558 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:34:21,558 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:34:21,559 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:34:21,560 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663261556Disabling compacts and flushes for region at 1731663261556Disabling writes for close at 1731663261558 (+2 ms)Writing region close event to WAL at 1731663261558Closed at 1731663261558 2024-11-15T09:34:21,562 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/.initializing 2024-11-15T09:34:21,562 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/WALs/791f12959b23,37481,1731663259873 2024-11-15T09:34:21,585 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37481%2C1731663259873, suffix=, logDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/WALs/791f12959b23,37481,1731663259873, archiveDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/oldWALs, maxLogs=10 2024-11-15T09:34:21,595 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37481%2C1731663259873.1731663261591 2024-11-15T09:34:21,617 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/WALs/791f12959b23,37481,1731663259873/791f12959b23%2C37481%2C1731663259873.1731663261591 2024-11-15T09:34:21,624 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38201:38201),(127.0.0.1/127.0.0.1:39345:39345)] 2024-11-15T09:34:21,625 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:34:21,625 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:34:21,628 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,629 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,664 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:34:21,690 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:21,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:21,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:34:21,697 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:21,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:34:21,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:34:21,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:21,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:34:21,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:34:21,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:21,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:34:21,707 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,710 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,712 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,717 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,718 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,721 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:34:21,725 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:34:21,729 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:34:21,731 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748943, jitterRate=-0.04766973853111267}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:34:21,737 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663261641Initializing all the Stores at 1731663261643 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663261643Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663261644 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663261644Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663261644Cleaning up temporary data from old regions at 1731663261718 (+74 ms)Region opened successfully at 1731663261737 (+19 ms) 2024-11-15T09:34:21,738 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:34:21,769 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2800d548, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:34:21,796 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:34:21,805 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:34:21,805 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:34:21,808 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:34:21,809 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-15T09:34:21,814 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-15T09:34:21,814 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:34:21,837 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:34:21,844 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:34:21,909 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:34:21,914 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:34:21,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:34:22,016 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:34:22,019 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:34:22,024 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:34:22,102 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:34:22,104 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:34:22,163 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:34:22,186 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:34:22,198 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:34:22,209 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:34:22,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:34:22,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:22,209 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:22,212 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,37481,1731663259873, sessionid=0x1013dd8614b0000, setting cluster-up flag (Was=false) 2024-11-15T09:34:22,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:22,240 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:22,272 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:34:22,276 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,37481,1731663259873 2024-11-15T09:34:22,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:22,299 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:22,335 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:34:22,340 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,37481,1731663259873 2024-11-15T09:34:22,349 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:34:22,375 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(746): ClusterId : 9cd43f53-8228-4e31-b0a5-8d4391cfb640 2024-11-15T09:34:22,377 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:34:22,390 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:34:22,390 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:34:22,400 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:34:22,401 DEBUG [RS:0;791f12959b23:39761 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42af7369, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:34:22,416 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:34:22,418 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:39761 2024-11-15T09:34:22,421 INFO [RS:0;791f12959b23:39761 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:34:22,421 INFO [RS:0;791f12959b23:39761 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:34:22,421 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:34:22,424 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,37481,1731663259873 with port=39761, startcode=1731663260659 2024-11-15T09:34:22,425 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:34:22,431 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:34:22,434 DEBUG [RS:0;791f12959b23:39761 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:34:22,436 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,37481,1731663259873 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:34:22,442 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:34:22,443 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:34:22,443 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:34:22,443 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:34:22,443 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:34:22,443 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,444 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:34:22,444 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,445 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663292445 2024-11-15T09:34:22,447 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:34:22,448 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:34:22,448 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:34:22,449 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:34:22,453 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:34:22,453 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:34:22,454 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:34:22,454 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:34:22,454 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:22,455 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:34:22,455 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,459 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:34:22,461 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:34:22,461 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:34:22,463 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:34:22,464 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:34:22,466 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663262465,5,FailOnTimeoutGroup] 2024-11-15T09:34:22,467 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663262466,5,FailOnTimeoutGroup] 2024-11-15T09:34:22,467 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,467 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:34:22,469 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:34:22,469 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:34:22,472 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:34:22,473 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7 2024-11-15T09:34:22,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:34:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:34:22,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:34:22,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:34:22,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:34:22,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:22,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:22,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:34:22,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:34:22,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:22,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:22,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:34:22,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:34:22,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:22,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:22,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:34:22,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:34:22,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:22,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:22,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:34:22,513 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44917, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:34:22,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740 2024-11-15T09:34:22,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740 2024-11-15T09:34:22,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:34:22,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:34:22,520 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:34:22,521 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37481 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,39761,1731663260659 2024-11-15T09:34:22,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:34:22,523 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37481 {}] master.ServerManager(517): Registering regionserver=791f12959b23,39761,1731663260659 2024-11-15T09:34:22,527 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:34:22,529 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715373, jitterRate=-0.09035708010196686}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:34:22,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663262490Initializing all the Stores at 1731663262492 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663262492Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663262492Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663262492Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663262493 (+1 ms)Cleaning up temporary data from old regions at 1731663262518 (+25 ms)Region opened successfully at 1731663262534 (+16 ms) 2024-11-15T09:34:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:34:22,535 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:34:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:34:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:34:22,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:34:22,537 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7 2024-11-15T09:34:22,537 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:34:22,537 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35471 2024-11-15T09:34:22,537 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:34:22,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663262535Disabling compacts and flushes for region at 1731663262535Disabling writes for close at 1731663262535Writing region close event to WAL at 1731663262536 (+1 ms)Closed at 1731663262537 (+1 ms) 2024-11-15T09:34:22,541 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:34:22,541 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:34:22,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:34:22,558 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:34:22,561 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:34:22,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:34:22,640 DEBUG [RS:0;791f12959b23:39761 {}] zookeeper.ZKUtil(111): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,39761,1731663260659 2024-11-15T09:34:22,640 WARN [RS:0;791f12959b23:39761 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:34:22,641 INFO [RS:0;791f12959b23:39761 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:34:22,641 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659 2024-11-15T09:34:22,644 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,39761,1731663260659] 2024-11-15T09:34:22,667 INFO [RS:0;791f12959b23:39761 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:34:22,684 INFO [RS:0;791f12959b23:39761 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:34:22,688 INFO [RS:0;791f12959b23:39761 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:34:22,688 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,689 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:34:22,694 INFO [RS:0;791f12959b23:39761 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:34:22,695 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,695 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,696 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,696 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,696 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,696 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,696 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:34:22,696 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,697 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,697 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,697 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,697 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,697 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:34:22,697 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:34:22,698 DEBUG [RS:0;791f12959b23:39761 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:34:22,698 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,698 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,699 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,699 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,699 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,699 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39761,1731663260659-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:34:22,712 WARN [791f12959b23:37481 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:34:22,716 INFO [RS:0;791f12959b23:39761 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:34:22,718 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39761,1731663260659-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,718 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,718 INFO [RS:0;791f12959b23:39761 {}] regionserver.Replication(171): 791f12959b23,39761,1731663260659 started 2024-11-15T09:34:22,734 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:22,735 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,39761,1731663260659, RpcServer on 791f12959b23/172.17.0.2:39761, sessionid=0x1013dd8614b0001 2024-11-15T09:34:22,735 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:34:22,735 DEBUG [RS:0;791f12959b23:39761 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,39761,1731663260659 2024-11-15T09:34:22,736 DEBUG [RS:0;791f12959b23:39761 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,39761,1731663260659' 2024-11-15T09:34:22,736 DEBUG [RS:0;791f12959b23:39761 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:34:22,737 DEBUG [RS:0;791f12959b23:39761 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:34:22,737 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:34:22,737 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:34:22,737 DEBUG [RS:0;791f12959b23:39761 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,39761,1731663260659 2024-11-15T09:34:22,738 DEBUG [RS:0;791f12959b23:39761 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,39761,1731663260659' 2024-11-15T09:34:22,738 DEBUG [RS:0;791f12959b23:39761 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:34:22,738 DEBUG [RS:0;791f12959b23:39761 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:34:22,739 DEBUG [RS:0;791f12959b23:39761 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:34:22,739 INFO [RS:0;791f12959b23:39761 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:34:22,739 INFO [RS:0;791f12959b23:39761 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:34:22,852 INFO [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C39761%2C1731663260659, suffix=, logDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659, archiveDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs, maxLogs=32 2024-11-15T09:34:22,855 INFO [RS:0;791f12959b23:39761 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663262855 2024-11-15T09:34:22,992 INFO [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(1368): Slow sync cost: 134 ms, current pipeline: [] 2024-11-15T09:34:22,992 INFO [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663262855 2024-11-15T09:34:22,994 DEBUG [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39345:39345),(127.0.0.1/127.0.0.1:38201:38201)] 2024-11-15T09:34:23,217 DEBUG [791f12959b23:37481 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:34:23,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,39761,1731663260659 2024-11-15T09:34:23,238 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,39761,1731663260659, state=OPENING 2024-11-15T09:34:23,251 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:34:23,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:23,261 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:34:23,263 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:34:23,263 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:34:23,266 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:34:23,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,39761,1731663260659}] 2024-11-15T09:34:23,449 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:34:23,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50625, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:34:23,462 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:34:23,462 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:34:23,466 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C39761%2C1731663260659.meta, suffix=.meta, logDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659, archiveDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs, maxLogs=32 2024-11-15T09:34:23,468 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.meta.1731663263468.meta 2024-11-15T09:34:23,476 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.meta.1731663263468.meta 2024-11-15T09:34:23,479 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38201:38201),(127.0.0.1/127.0.0.1:39345:39345)] 2024-11-15T09:34:23,482 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:34:23,484 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:34:23,486 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:34:23,490 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:34:23,494 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:34:23,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:34:23,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:34:23,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:34:23,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:34:23,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:34:23,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:23,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:23,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:34:23,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:34:23,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:23,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:23,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:34:23,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:34:23,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:23,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:23,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:34:23,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:34:23,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:23,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:34:23,513 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:34:23,515 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740 2024-11-15T09:34:23,518 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740 2024-11-15T09:34:23,522 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:34:23,522 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:34:23,523 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:34:23,525 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:34:23,527 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812641, jitterRate=0.03332667052745819}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:34:23,527 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:34:23,528 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663263496Writing region info on filesystem at 1731663263496Initializing all the Stores at 1731663263498 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663263498Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663263498Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663263498Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663263498Cleaning up temporary data from old regions at 1731663263522 (+24 ms)Running coprocessor post-open hooks at 1731663263527 (+5 ms)Region opened successfully at 1731663263528 (+1 ms) 2024-11-15T09:34:23,535 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663263441 2024-11-15T09:34:23,547 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:34:23,547 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:34:23,549 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,39761,1731663260659 2024-11-15T09:34:23,551 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,39761,1731663260659, state=OPEN 2024-11-15T09:34:23,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:34:23,587 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:34:23,588 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:34:23,588 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:34:23,588 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,39761,1731663260659 2024-11-15T09:34:23,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:34:23,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,39761,1731663260659 in 319 msec 2024-11-15T09:34:23,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:34:23,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0510 sec 2024-11-15T09:34:23,606 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:34:23,606 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:34:23,624 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:34:23,625 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,39761,1731663260659, seqNum=-1] 2024-11-15T09:34:23,643 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:34:23,645 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52331, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:34:23,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2840 sec 2024-11-15T09:34:23,664 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663263664, completionTime=-1 2024-11-15T09:34:23,667 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:34:23,667 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:34:23,692 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:34:23,692 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663323692 2024-11-15T09:34:23,692 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663383692 2024-11-15T09:34:23,692 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 25 msec 2024-11-15T09:34:23,696 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37481,1731663259873-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,696 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37481,1731663259873-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,696 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37481,1731663259873-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,698 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:37481, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,698 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,699 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,704 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:34:23,723 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.871sec 2024-11-15T09:34:23,724 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:34:23,726 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:34:23,727 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:34:23,728 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:34:23,728 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:34:23,729 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37481,1731663259873-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:34:23,730 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37481,1731663259873-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:34:23,739 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:34:23,740 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:34:23,740 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37481,1731663259873-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:34:23,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33c17658, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:34:23,786 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-15T09:34:23,786 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-15T09:34:23,789 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,37481,-1 for getting cluster id 2024-11-15T09:34:23,792 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:34:23,799 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9cd43f53-8228-4e31-b0a5-8d4391cfb640' 2024-11-15T09:34:23,802 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:34:23,803 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9cd43f53-8228-4e31-b0a5-8d4391cfb640" 2024-11-15T09:34:23,805 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2461ffc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:34:23,805 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,37481,-1] 2024-11-15T09:34:23,807 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:34:23,809 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:34:23,810 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48874, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:34:23,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d9e869, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:34:23,813 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:34:23,820 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,39761,1731663260659, seqNum=-1] 2024-11-15T09:34:23,820 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:34:23,823 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55886, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:34:23,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,37481,1731663259873 2024-11-15T09:34:23,841 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:34:23,847 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:34:23,851 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T09:34:23,856 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 791f12959b23,37481,1731663259873 2024-11-15T09:34:23,859 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@28c43b06 2024-11-15T09:34:23,860 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T09:34:23,863 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48878, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T09:34:23,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T09:34:23,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T09:34:23,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:34:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-15T09:34:23,898 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T09:34:23,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-15T09:34:23,900 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:23,903 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T09:34:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:34:23,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741835_1011 (size=389) 2024-11-15T09:34:23,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741835_1011 (size=389) 2024-11-15T09:34:24,361 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c6a47ca4c8b14d4f34947551cd126497, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7 2024-11-15T09:34:24,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741836_1012 (size=72) 2024-11-15T09:34:24,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741836_1012 (size=72) 2024-11-15T09:34:24,373 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:34:24,373 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c6a47ca4c8b14d4f34947551cd126497, disabling compactions & flushes 2024-11-15T09:34:24,373 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,373 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,374 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. after waiting 0 ms 2024-11-15T09:34:24,374 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,374 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,374 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c6a47ca4c8b14d4f34947551cd126497: Waiting for close lock at 1731663264373Disabling compacts and flushes for region at 1731663264373Disabling writes for close at 1731663264374 (+1 ms)Writing region close event to WAL at 1731663264374Closed at 1731663264374 2024-11-15T09:34:24,376 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T09:34:24,380 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731663264376"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663264376"}]},"ts":"1731663264376"} 2024-11-15T09:34:24,385 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T09:34:24,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T09:34:24,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663264387"}]},"ts":"1731663264387"} 2024-11-15T09:34:24,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-15T09:34:24,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c6a47ca4c8b14d4f34947551cd126497, ASSIGN}] 2024-11-15T09:34:24,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c6a47ca4c8b14d4f34947551cd126497, ASSIGN 2024-11-15T09:34:24,401 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c6a47ca4c8b14d4f34947551cd126497, ASSIGN; state=OFFLINE, location=791f12959b23,39761,1731663260659; forceNewPlan=false, retain=false 2024-11-15T09:34:24,553 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c6a47ca4c8b14d4f34947551cd126497, regionState=OPENING, regionLocation=791f12959b23,39761,1731663260659 2024-11-15T09:34:24,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c6a47ca4c8b14d4f34947551cd126497, ASSIGN because future has completed 2024-11-15T09:34:24,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6a47ca4c8b14d4f34947551cd126497, server=791f12959b23,39761,1731663260659}] 2024-11-15T09:34:24,727 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,728 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c6a47ca4c8b14d4f34947551cd126497, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:34:24,729 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,729 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:34:24,729 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,729 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,733 INFO [StoreOpener-c6a47ca4c8b14d4f34947551cd126497-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,735 INFO [StoreOpener-c6a47ca4c8b14d4f34947551cd126497-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c6a47ca4c8b14d4f34947551cd126497 columnFamilyName info 2024-11-15T09:34:24,736 DEBUG [StoreOpener-c6a47ca4c8b14d4f34947551cd126497-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:34:24,737 INFO [StoreOpener-c6a47ca4c8b14d4f34947551cd126497-1 {}] regionserver.HStore(327): Store=c6a47ca4c8b14d4f34947551cd126497/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:34:24,737 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,739 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,739 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,740 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,740 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,743 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,746 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:34:24,747 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c6a47ca4c8b14d4f34947551cd126497; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835686, jitterRate=0.06263066828250885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:34:24,747 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:24,748 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c6a47ca4c8b14d4f34947551cd126497: Running coprocessor pre-open hook at 1731663264729Writing region info on filesystem at 1731663264729Initializing all the Stores at 1731663264732 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663264732Cleaning up temporary data from old regions at 1731663264740 (+8 ms)Running coprocessor post-open hooks at 1731663264747 (+7 ms)Region opened successfully at 1731663264748 (+1 ms) 2024-11-15T09:34:24,750 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497., pid=6, masterSystemTime=1731663264719 2024-11-15T09:34:24,754 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,754 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:24,756 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c6a47ca4c8b14d4f34947551cd126497, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,39761,1731663260659 2024-11-15T09:34:24,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6a47ca4c8b14d4f34947551cd126497, server=791f12959b23,39761,1731663260659 because future has completed 2024-11-15T09:34:24,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T09:34:24,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c6a47ca4c8b14d4f34947551cd126497, server=791f12959b23,39761,1731663260659 in 198 msec 2024-11-15T09:34:24,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T09:34:24,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=c6a47ca4c8b14d4f34947551cd126497, ASSIGN in 371 msec 2024-11-15T09:34:24,774 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T09:34:24,774 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663264774"}]},"ts":"1731663264774"} 2024-11-15T09:34:24,778 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-15T09:34:24,780 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T09:34:24,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 891 msec 2024-11-15T09:34:29,009 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-15T09:34:29,052 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T09:34:29,054 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-15T09:34:30,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:34:30,229 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T09:34:30,231 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T09:34:30,232 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T09:34:30,234 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:34:30,234 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T09:34:30,234 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:34:30,234 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T09:34:34,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37481 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:34:34,020 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-15T09:34:34,025 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-15T09:34:34,031 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-15T09:34:34,032 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:34:34,033 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663274032 2024-11-15T09:34:34,042 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:34,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:34,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:34,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:34,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:34,044 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663262855 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663274032 2024-11-15T09:34:34,045 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39345:39345),(127.0.0.1/127.0.0.1:38201:38201)] 2024-11-15T09:34:34,046 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663262855 is not closed yet, will try archiving it next time 2024-11-15T09:34:34,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741833_1009 (size=451) 2024-11-15T09:34:34,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741833_1009 (size=451) 2024-11-15T09:34:34,050 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663262855 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663262855 2024-11-15T09:34:34,058 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497., hostname=791f12959b23,39761,1731663260659, seqNum=2] 2024-11-15T09:34:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39761 {}] regionserver.HRegion(8855): Flush requested on c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:34:46,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c6a47ca4c8b14d4f34947551cd126497 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:34:46,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/2b685daea6cd4bb491ec2075b2220af0 is 1080, key is row0001/info:/1731663274062/Put/seqid=0 2024-11-15T09:34:46,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741838_1014 (size=12509) 2024-11-15T09:34:46,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741838_1014 (size=12509) 2024-11-15T09:34:46,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/2b685daea6cd4bb491ec2075b2220af0 2024-11-15T09:34:46,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/2b685daea6cd4bb491ec2075b2220af0 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0 2024-11-15T09:34:46,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T09:34:46,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c6a47ca4c8b14d4f34947551cd126497 in 165ms, sequenceid=11, compaction requested=false 2024-11-15T09:34:46,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c6a47ca4c8b14d4f34947551cd126497: 2024-11-15T09:34:48,989 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:34:54,104 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663294104 2024-11-15T09:34:54,313 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:34:54,313 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:54,313 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:54,313 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:54,314 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:54,314 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:34:54,314 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663274032 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663294104 2024-11-15T09:34:54,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741837_1013 (size=12399) 2024-11-15T09:34:54,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741837_1013 (size=12399) 2024-11-15T09:34:54,323 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39345:39345),(127.0.0.1/127.0.0.1:38201:38201)] 2024-11-15T09:34:54,526 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:34:56,729 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:34:58,934 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:01,138 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39761 {}] regionserver.HRegion(8855): Flush requested on c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:35:01,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c6a47ca4c8b14d4f34947551cd126497 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:35:01,341 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:01,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/8169c77336ff438985d845e070410ca0 is 1080, key is row0008/info:/1731663288093/Put/seqid=0 2024-11-15T09:35:01,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741840_1016 (size=12509) 2024-11-15T09:35:01,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741840_1016 (size=12509) 2024-11-15T09:35:01,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/8169c77336ff438985d845e070410ca0 2024-11-15T09:35:01,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/8169c77336ff438985d845e070410ca0 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/8169c77336ff438985d845e070410ca0 2024-11-15T09:35:01,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/8169c77336ff438985d845e070410ca0, entries=7, sequenceid=21, filesize=12.2 K 2024-11-15T09:35:01,580 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:01,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c6a47ca4c8b14d4f34947551cd126497 in 441ms, sequenceid=21, compaction requested=false 2024-11-15T09:35:01,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c6a47ca4c8b14d4f34947551cd126497: 2024-11-15T09:35:01,581 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-15T09:35:01,581 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:35:01,582 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0 because midkey is the same as first or last row 2024-11-15T09:35:03,344 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:04,217 INFO [master/791f12959b23:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T09:35:04,217 INFO [master/791f12959b23:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T09:35:05,552 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:05,555 WARN [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:05,557 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C39761%2C1731663260659:(num 1731663294104) roll requested 2024-11-15T09:35:05,558 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663305558 2024-11-15T09:35:05,801 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:05,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:05,802 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:05,802 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:05,802 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:05,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:05,802 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663294104 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663305558 2024-11-15T09:35:05,803 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38201:38201),(127.0.0.1/127.0.0.1:39345:39345)] 2024-11-15T09:35:05,803 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663294104 is not closed yet, will try archiving it next time 2024-11-15T09:35:05,803 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663274032 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663274032 2024-11-15T09:35:05,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741839_1015 (size=7739) 2024-11-15T09:35:05,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741839_1015 (size=7739) 2024-11-15T09:35:07,758 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:09,729 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c6a47ca4c8b14d4f34947551cd126497, had cached 0 bytes from a total of 25018 2024-11-15T09:35:09,965 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:12,170 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:14,378 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:16,381 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T09:35:16,381 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663316381 2024-11-15T09:35:18,990 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:35:21,390 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:21,392 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:21,392 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C39761%2C1731663260659:(num 1731663316381) roll requested 2024-11-15T09:35:21,392 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:21,393 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:21,393 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:21,393 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:21,393 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:21,393 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663305558 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663316381 2024-11-15T09:35:21,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741841_1017 (size=4753) 2024-11-15T09:35:21,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741841_1017 (size=4753) 2024-11-15T09:35:21,405 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38201:38201),(127.0.0.1/127.0.0.1:39345:39345)] 2024-11-15T09:35:21,405 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663305558 is not closed yet, will try archiving it next time 2024-11-15T09:35:21,405 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663321405 2024-11-15T09:35:26,408 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:26,408 WARN [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39761 {}] regionserver.HRegion(8855): Flush requested on c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:35:26,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c6a47ca4c8b14d4f34947551cd126497 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:35:26,420 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:26,420 WARN [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:28,410 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T09:35:31,412 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:31,412 WARN [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK], DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK]] 2024-11-15T09:35:31,413 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:31,413 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:31,414 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:31,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:31,414 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:31,415 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663316381 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663321405 2024-11-15T09:35:31,418 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39345:39345),(127.0.0.1/127.0.0.1:38201:38201)] 2024-11-15T09:35:31,418 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663316381 is not closed yet, will try archiving it next time 2024-11-15T09:35:31,418 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C39761%2C1731663260659:(num 1731663321405) roll requested 2024-11-15T09:35:31,419 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663331418 2024-11-15T09:35:31,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741842_1018 (size=1569) 2024-11-15T09:35:31,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741842_1018 (size=1569) 2024-11-15T09:35:31,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/2a63ff928aa741a19f2c7e32f9424126 is 1080, key is row0015/info:/1731663303141/Put/seqid=0 2024-11-15T09:35:31,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741844_1020 (size=12509) 2024-11-15T09:35:31,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741844_1020 (size=12509) 2024-11-15T09:35:31,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/2a63ff928aa741a19f2c7e32f9424126 2024-11-15T09:35:31,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/2a63ff928aa741a19f2c7e32f9424126 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2a63ff928aa741a19f2c7e32f9424126 2024-11-15T09:35:31,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2a63ff928aa741a19f2c7e32f9424126, entries=7, sequenceid=31, filesize=12.2 K 2024-11-15T09:35:36,433 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:36,433 WARN [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:36,453 INFO [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:36,454 WARN [FSHLog-0-hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7-prefix:791f12959b23,39761,1731663260659 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32973,DS-d9e1f711-328c-45ec-ba2e-cd2db2435942,DISK], DatanodeInfoWithStorage[127.0.0.1:33343,DS-9d19dc5e-c159-488e-aa93-2db2b763588f,DISK]] 2024-11-15T09:35:36,454 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,454 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,454 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c6a47ca4c8b14d4f34947551cd126497 in 10045ms, sequenceid=31, compaction requested=true 2024-11-15T09:35:36,454 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c6a47ca4c8b14d4f34947551cd126497: 2024-11-15T09:35:36,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,455 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663321405 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663331418 2024-11-15T09:35:36,455 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-15T09:35:36,455 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:35:36,455 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0 because midkey is the same as first or last row 2024-11-15T09:35:36,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741843_1019 (size=438) 2024-11-15T09:35:36,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741843_1019 (size=438) 2024-11-15T09:35:36,459 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39345:39345),(127.0.0.1/127.0.0.1:38201:38201)] 2024-11-15T09:35:36,459 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663321405 is not closed yet, will try archiving it next time 2024-11-15T09:35:36,459 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C39761%2C1731663260659:(num 1731663336459) roll requested 2024-11-15T09:35:36,460 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663336459 2024-11-15T09:35:36,460 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663294104 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663294104 2024-11-15T09:35:36,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c6a47ca4c8b14d4f34947551cd126497:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:35:36,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:35:36,464 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663305558 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663305558 2024-11-15T09:35:36,467 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:35:36,468 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663316381 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663316381 2024-11-15T09:35:36,471 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663321405 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663321405 2024-11-15T09:35:36,473 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:35:36,476 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.HStore(1541): c6a47ca4c8b14d4f34947551cd126497/info is initiating minor compaction (all files) 2024-11-15T09:35:36,476 INFO [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c6a47ca4c8b14d4f34947551cd126497/info in TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:35:36,477 INFO [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/8169c77336ff438985d845e070410ca0, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2a63ff928aa741a19f2c7e32f9424126] into tmpdir=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp, totalSize=36.6 K 2024-11-15T09:35:36,481 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b685daea6cd4bb491ec2075b2220af0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731663274062 2024-11-15T09:35:36,484 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8169c77336ff438985d845e070410ca0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731663288093 2024-11-15T09:35:36,485 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2a63ff928aa741a19f2c7e32f9424126, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731663303141 2024-11-15T09:35:36,496 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,496 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,497 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,497 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,498 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,498 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663331418 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663336459 2024-11-15T09:35:36,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741845_1021 (size=93) 2024-11-15T09:35:36,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741845_1021 (size=93) 2024-11-15T09:35:36,504 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663331418 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs/791f12959b23%2C39761%2C1731663260659.1731663331418 2024-11-15T09:35:36,518 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39345:39345),(127.0.0.1/127.0.0.1:38201:38201)] 2024-11-15T09:35:36,518 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39761%2C1731663260659.1731663336518 2024-11-15T09:35:36,533 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,534 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,534 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,534 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,534 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:36,534 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663336459 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663336518 2024-11-15T09:35:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741846_1022 (size=1258) 2024-11-15T09:35:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741846_1022 (size=1258) 2024-11-15T09:35:36,541 INFO [RS:0;791f12959b23:39761-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c6a47ca4c8b14d4f34947551cd126497#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:35:36,542 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38201:38201),(127.0.0.1/127.0.0.1:39345:39345)] 2024-11-15T09:35:36,542 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/WALs/791f12959b23,39761,1731663260659/791f12959b23%2C39761%2C1731663260659.1731663336459 is not closed yet, will try archiving it next time 2024-11-15T09:35:36,542 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/4e458cc8c9f54a358094dcca4f8615e0 is 1080, key is row0001/info:/1731663274062/Put/seqid=0 2024-11-15T09:35:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741848_1024 (size=27710) 2024-11-15T09:35:36,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741848_1024 (size=27710) 2024-11-15T09:35:36,562 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/4e458cc8c9f54a358094dcca4f8615e0 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/4e458cc8c9f54a358094dcca4f8615e0 2024-11-15T09:35:36,580 INFO [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c6a47ca4c8b14d4f34947551cd126497/info of c6a47ca4c8b14d4f34947551cd126497 into 4e458cc8c9f54a358094dcca4f8615e0(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:35:36,580 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c6a47ca4c8b14d4f34947551cd126497: 2024-11-15T09:35:36,582 INFO [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497., storeName=c6a47ca4c8b14d4f34947551cd126497/info, priority=13, startTime=1731663336456; duration=0sec 2024-11-15T09:35:36,583 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T09:35:36,583 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:35:36,583 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/4e458cc8c9f54a358094dcca4f8615e0 because midkey is the same as first or last row 2024-11-15T09:35:36,583 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T09:35:36,583 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:35:36,584 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/4e458cc8c9f54a358094dcca4f8615e0 because midkey is the same as first or last row 2024-11-15T09:35:36,584 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T09:35:36,584 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:35:36,584 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/4e458cc8c9f54a358094dcca4f8615e0 because midkey is the same as first or last row 2024-11-15T09:35:36,584 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:35:36,584 DEBUG [RS:0;791f12959b23:39761-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c6a47ca4c8b14d4f34947551cd126497:info 2024-11-15T09:35:48,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39761 {}] regionserver.HRegion(8855): Flush requested on c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:35:48,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c6a47ca4c8b14d4f34947551cd126497 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:35:48,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/5c6809427486475ba45985d389fb44e8 is 1080, key is row0022/info:/1731663336520/Put/seqid=0 2024-11-15T09:35:48,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741849_1025 (size=12509) 2024-11-15T09:35:48,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741849_1025 (size=12509) 2024-11-15T09:35:48,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/5c6809427486475ba45985d389fb44e8 2024-11-15T09:35:48,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/5c6809427486475ba45985d389fb44e8 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/5c6809427486475ba45985d389fb44e8 2024-11-15T09:35:48,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/5c6809427486475ba45985d389fb44e8, entries=7, sequenceid=42, filesize=12.2 K 2024-11-15T09:35:48,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for c6a47ca4c8b14d4f34947551cd126497 in 39ms, sequenceid=42, compaction requested=false 2024-11-15T09:35:48,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c6a47ca4c8b14d4f34947551cd126497: 2024-11-15T09:35:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-15T09:35:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:35:48,595 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/4e458cc8c9f54a358094dcca4f8615e0 because midkey is the same as first or last row 2024-11-15T09:35:48,990 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:35:54,730 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c6a47ca4c8b14d4f34947551cd126497, had cached 0 bytes from a total of 40219 2024-11-15T09:35:56,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:35:56,573 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:35:56,573 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:35:56,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:35:56,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:35:56,579 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:35:56,579 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:35:56,579 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=887671035, stopped=false 2024-11-15T09:35:56,579 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,37481,1731663259873 2024-11-15T09:35:56,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:35:56,629 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:35:56,629 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:35:56,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:35:56,629 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:35:56,630 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:35:56,630 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:35:56,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:35:56,631 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:35:56,631 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:35:56,631 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,39761,1731663260659' ***** 2024-11-15T09:35:56,631 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:35:56,632 INFO [RS:0;791f12959b23:39761 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:35:56,632 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:35:56,632 INFO [RS:0;791f12959b23:39761 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:35:56,633 INFO [RS:0;791f12959b23:39761 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:35:56,633 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(3091): Received CLOSE for c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:35:56,634 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,39761,1731663260659 2024-11-15T09:35:56,634 INFO [RS:0;791f12959b23:39761 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:35:56,634 INFO [RS:0;791f12959b23:39761 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:39761. 2024-11-15T09:35:56,634 DEBUG [RS:0;791f12959b23:39761 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:35:56,634 DEBUG [RS:0;791f12959b23:39761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:35:56,635 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c6a47ca4c8b14d4f34947551cd126497, disabling compactions & flushes 2024-11-15T09:35:56,635 INFO [RS:0;791f12959b23:39761 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:35:56,635 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:35:56,635 INFO [RS:0;791f12959b23:39761 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:35:56,635 INFO [RS:0;791f12959b23:39761 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:35:56,635 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:35:56,635 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:35:56,635 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. after waiting 0 ms 2024-11-15T09:35:56,635 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:35:56,636 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T09:35:56,636 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c6a47ca4c8b14d4f34947551cd126497 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-15T09:35:56,636 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:35:56,636 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1325): Online Regions={c6a47ca4c8b14d4f34947551cd126497=TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497., 1588230740=hbase:meta,,1.1588230740} 2024-11-15T09:35:56,636 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:35:56,636 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:35:56,636 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:35:56,636 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:35:56,636 DEBUG [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c6a47ca4c8b14d4f34947551cd126497 2024-11-15T09:35:56,637 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-15T09:35:56,642 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/72e4d48595194cdfadd04fcc52a75158 is 1080, key is row0029/info:/1731663350560/Put/seqid=0 2024-11-15T09:35:56,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741850_1026 (size=8193) 2024-11-15T09:35:56,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741850_1026 (size=8193) 2024-11-15T09:35:56,651 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/72e4d48595194cdfadd04fcc52a75158 2024-11-15T09:35:56,661 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/info/581ab3dae8d94bfdbcb8a76348f3db57 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497./info:regioninfo/1731663264755/Put/seqid=0 2024-11-15T09:35:56,661 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/.tmp/info/72e4d48595194cdfadd04fcc52a75158 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/72e4d48595194cdfadd04fcc52a75158 2024-11-15T09:35:56,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741851_1027 (size=7016) 2024-11-15T09:35:56,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741851_1027 (size=7016) 2024-11-15T09:35:56,669 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/info/581ab3dae8d94bfdbcb8a76348f3db57 2024-11-15T09:35:56,670 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/72e4d48595194cdfadd04fcc52a75158, entries=3, sequenceid=48, filesize=8.0 K 2024-11-15T09:35:56,672 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for c6a47ca4c8b14d4f34947551cd126497 in 37ms, sequenceid=48, compaction requested=true 2024-11-15T09:35:56,673 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/8169c77336ff438985d845e070410ca0, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2a63ff928aa741a19f2c7e32f9424126] to archive 2024-11-15T09:35:56,676 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T09:35:56,679 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2b685daea6cd4bb491ec2075b2220af0 2024-11-15T09:35:56,681 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/8169c77336ff438985d845e070410ca0 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/8169c77336ff438985d845e070410ca0 2024-11-15T09:35:56,683 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2a63ff928aa741a19f2c7e32f9424126 to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/archive/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/info/2a63ff928aa741a19f2c7e32f9424126 2024-11-15T09:35:56,697 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/ns/c99cbd66b1864517a5c295bc2be2df41 is 43, key is default/ns:d/1731663263650/Put/seqid=0 2024-11-15T09:35:56,694 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=791f12959b23:37481 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T09:35:56,699 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2b685daea6cd4bb491ec2075b2220af0=12509, 8169c77336ff438985d845e070410ca0=12509, 2a63ff928aa741a19f2c7e32f9424126=12509] 2024-11-15T09:35:56,705 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:35:56,705 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/default/TestLogRolling-testSlowSyncLogRolling/c6a47ca4c8b14d4f34947551cd126497/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-15T09:35:56,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741852_1028 (size=5153) 2024-11-15T09:35:56,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741852_1028 (size=5153) 2024-11-15T09:35:56,708 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/ns/c99cbd66b1864517a5c295bc2be2df41 2024-11-15T09:35:56,710 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:35:56,710 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c6a47ca4c8b14d4f34947551cd126497: Waiting for close lock at 1731663356634Running coprocessor pre-close hooks at 1731663356635 (+1 ms)Disabling compacts and flushes for region at 1731663356635Disabling writes for close at 1731663356635Obtaining lock to block concurrent updates at 1731663356636 (+1 ms)Preparing flush snapshotting stores in c6a47ca4c8b14d4f34947551cd126497 at 1731663356636Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731663356636Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. at 1731663356637 (+1 ms)Flushing c6a47ca4c8b14d4f34947551cd126497/info: creating writer at 1731663356638 (+1 ms)Flushing c6a47ca4c8b14d4f34947551cd126497/info: appending metadata at 1731663356642 (+4 ms)Flushing c6a47ca4c8b14d4f34947551cd126497/info: closing flushed file at 1731663356642Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f38bb83: reopening flushed file at 1731663356660 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for c6a47ca4c8b14d4f34947551cd126497 in 37ms, sequenceid=48, compaction requested=true at 1731663356672 (+12 ms)Writing region close event to WAL at 1731663356700 (+28 ms)Running coprocessor post-close hooks at 1731663356708 (+8 ms)Closed at 1731663356709 (+1 ms) 2024-11-15T09:35:56,710 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731663263864.c6a47ca4c8b14d4f34947551cd126497. 2024-11-15T09:35:56,731 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/table/dcebff19df6a48ef9bf2d87d1ebcd317 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731663264774/Put/seqid=0 2024-11-15T09:35:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741853_1029 (size=5396) 2024-11-15T09:35:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741853_1029 (size=5396) 2024-11-15T09:35:56,737 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/table/dcebff19df6a48ef9bf2d87d1ebcd317 2024-11-15T09:35:56,744 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/info/581ab3dae8d94bfdbcb8a76348f3db57 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/info/581ab3dae8d94bfdbcb8a76348f3db57 2024-11-15T09:35:56,753 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/info/581ab3dae8d94bfdbcb8a76348f3db57, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T09:35:56,755 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/ns/c99cbd66b1864517a5c295bc2be2df41 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/ns/c99cbd66b1864517a5c295bc2be2df41 2024-11-15T09:35:56,763 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/ns/c99cbd66b1864517a5c295bc2be2df41, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T09:35:56,765 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/.tmp/table/dcebff19df6a48ef9bf2d87d1ebcd317 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/table/dcebff19df6a48ef9bf2d87d1ebcd317 2024-11-15T09:35:56,772 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/table/dcebff19df6a48ef9bf2d87d1ebcd317, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T09:35:56,774 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-11-15T09:35:56,777 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T09:35:56,778 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T09:35:56,780 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T09:35:56,781 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:35:56,781 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:35:56,781 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663356636Running coprocessor pre-close hooks at 1731663356636Disabling compacts and flushes for region at 1731663356636Disabling writes for close at 1731663356636Obtaining lock to block concurrent updates at 1731663356637 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731663356637Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731663356637Flushing stores of hbase:meta,,1.1588230740 at 1731663356638 (+1 ms)Flushing 1588230740/info: creating writer at 1731663356639 (+1 ms)Flushing 1588230740/info: appending metadata at 1731663356660 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731663356660Flushing 1588230740/ns: creating writer at 1731663356678 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731663356696 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731663356696Flushing 1588230740/table: creating writer at 1731663356716 (+20 ms)Flushing 1588230740/table: appending metadata at 1731663356730 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731663356730Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33c9aee2: reopening flushed file at 1731663356743 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45308b16: reopening flushed file at 1731663356754 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5062dfff: reopening flushed file at 1731663356763 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1731663356774 (+11 ms)Writing region close event to WAL at 1731663356775 (+1 ms)Running coprocessor post-close hooks at 1731663356781 (+6 ms)Closed at 1731663356781 2024-11-15T09:35:56,781 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:35:56,837 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,39761,1731663260659; all regions closed. 2024-11-15T09:35:56,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:56,839 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:56,839 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:56,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:56,839 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741834_1010 (size=3066) 2024-11-15T09:35:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741834_1010 (size=3066) 2024-11-15T09:35:57,248 DEBUG [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs 2024-11-15T09:35:57,248 INFO [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C39761%2C1731663260659.meta:.meta(num 1731663263468) 2024-11-15T09:35:57,249 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741847_1023 (size=12695) 2024-11-15T09:35:57,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741847_1023 (size=12695) 2024-11-15T09:35:57,257 DEBUG [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/oldWALs 2024-11-15T09:35:57,257 INFO [RS:0;791f12959b23:39761 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C39761%2C1731663260659:(num 1731663336518) 2024-11-15T09:35:57,257 DEBUG [RS:0;791f12959b23:39761 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:35:57,257 INFO [RS:0;791f12959b23:39761 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:35:57,257 INFO [RS:0;791f12959b23:39761 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:35:57,257 INFO [RS:0;791f12959b23:39761 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T09:35:57,258 INFO [RS:0;791f12959b23:39761 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:35:57,258 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:35:57,258 INFO [RS:0;791f12959b23:39761 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39761 2024-11-15T09:35:57,281 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,39761,1731663260659 2024-11-15T09:35:57,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:35:57,281 INFO [RS:0;791f12959b23:39761 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:35:57,283 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,39761,1731663260659] 2024-11-15T09:35:57,302 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,39761,1731663260659 already deleted, retry=false 2024-11-15T09:35:57,302 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,39761,1731663260659 expired; onlineServers=0 2024-11-15T09:35:57,302 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,37481,1731663259873' ***** 2024-11-15T09:35:57,303 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:35:57,303 INFO [M:0;791f12959b23:37481 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:35:57,303 INFO [M:0;791f12959b23:37481 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:35:57,303 DEBUG [M:0;791f12959b23:37481 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:35:57,303 DEBUG [M:0;791f12959b23:37481 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:35:57,303 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:35:57,303 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663262466 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663262466,5,FailOnTimeoutGroup] 2024-11-15T09:35:57,303 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663262465 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663262465,5,FailOnTimeoutGroup] 2024-11-15T09:35:57,303 INFO [M:0;791f12959b23:37481 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:35:57,303 INFO [M:0;791f12959b23:37481 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:35:57,304 DEBUG [M:0;791f12959b23:37481 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:35:57,304 INFO [M:0;791f12959b23:37481 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:35:57,304 INFO [M:0;791f12959b23:37481 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:35:57,304 INFO [M:0;791f12959b23:37481 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:35:57,304 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:35:57,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:35:57,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:35:57,313 DEBUG [M:0;791f12959b23:37481 {}] zookeeper.ZKUtil(347): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:35:57,313 WARN [M:0;791f12959b23:37481 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:35:57,314 INFO [M:0;791f12959b23:37481 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/.lastflushedseqids 2024-11-15T09:35:57,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741854_1030 (size=130) 2024-11-15T09:35:57,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741854_1030 (size=130) 2024-11-15T09:35:57,326 INFO [M:0;791f12959b23:37481 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:35:57,326 INFO [M:0;791f12959b23:37481 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:35:57,326 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:35:57,326 INFO [M:0;791f12959b23:37481 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:35:57,326 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:35:57,326 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:35:57,326 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:35:57,326 INFO [M:0;791f12959b23:37481 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-15T09:35:57,343 DEBUG [M:0;791f12959b23:37481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea25b530b1e24080b8d9c4b37f444b23 is 82, key is hbase:meta,,1/info:regioninfo/1731663263548/Put/seqid=0 2024-11-15T09:35:57,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741855_1031 (size=5672) 2024-11-15T09:35:57,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741855_1031 (size=5672) 2024-11-15T09:35:57,350 INFO [M:0;791f12959b23:37481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea25b530b1e24080b8d9c4b37f444b23 2024-11-15T09:35:57,371 DEBUG [M:0;791f12959b23:37481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72ffed80b3ba46239f02a3aebb820290 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731663264782/Put/seqid=0 2024-11-15T09:35:57,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741856_1032 (size=6247) 2024-11-15T09:35:57,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741856_1032 (size=6247) 2024-11-15T09:35:57,377 INFO [M:0;791f12959b23:37481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72ffed80b3ba46239f02a3aebb820290 2024-11-15T09:35:57,384 INFO [M:0;791f12959b23:37481 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 72ffed80b3ba46239f02a3aebb820290 2024-11-15T09:35:57,392 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:35:57,392 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39761-0x1013dd8614b0001, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:35:57,393 INFO [RS:0;791f12959b23:39761 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:35:57,393 INFO [RS:0;791f12959b23:39761 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,39761,1731663260659; zookeeper connection closed. 2024-11-15T09:35:57,393 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@645c8260 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@645c8260 2024-11-15T09:35:57,394 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T09:35:57,400 DEBUG [M:0;791f12959b23:37481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f134f7d7c12c40b99f44617c091aab03 is 69, key is 791f12959b23,39761,1731663260659/rs:state/1731663262526/Put/seqid=0 2024-11-15T09:35:57,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741857_1033 (size=5156) 2024-11-15T09:35:57,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741857_1033 (size=5156) 2024-11-15T09:35:57,406 INFO [M:0;791f12959b23:37481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f134f7d7c12c40b99f44617c091aab03 2024-11-15T09:35:57,436 DEBUG [M:0;791f12959b23:37481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/212a9e9b6065483cb1f0b858b331cfe1 is 52, key is load_balancer_on/state:d/1731663263844/Put/seqid=0 2024-11-15T09:35:57,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741858_1034 (size=5056) 2024-11-15T09:35:57,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741858_1034 (size=5056) 2024-11-15T09:35:57,846 INFO [M:0;791f12959b23:37481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/212a9e9b6065483cb1f0b858b331cfe1 2024-11-15T09:35:57,861 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea25b530b1e24080b8d9c4b37f444b23 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea25b530b1e24080b8d9c4b37f444b23 2024-11-15T09:35:57,869 INFO [M:0;791f12959b23:37481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea25b530b1e24080b8d9c4b37f444b23, entries=8, sequenceid=59, filesize=5.5 K 2024-11-15T09:35:57,870 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72ffed80b3ba46239f02a3aebb820290 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72ffed80b3ba46239f02a3aebb820290 2024-11-15T09:35:57,876 INFO [M:0;791f12959b23:37481 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 72ffed80b3ba46239f02a3aebb820290 2024-11-15T09:35:57,876 INFO [M:0;791f12959b23:37481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72ffed80b3ba46239f02a3aebb820290, entries=6, sequenceid=59, filesize=6.1 K 2024-11-15T09:35:57,878 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f134f7d7c12c40b99f44617c091aab03 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f134f7d7c12c40b99f44617c091aab03 2024-11-15T09:35:57,884 INFO [M:0;791f12959b23:37481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f134f7d7c12c40b99f44617c091aab03, entries=1, sequenceid=59, filesize=5.0 K 2024-11-15T09:35:57,885 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/212a9e9b6065483cb1f0b858b331cfe1 as hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/212a9e9b6065483cb1f0b858b331cfe1 2024-11-15T09:35:57,892 INFO [M:0;791f12959b23:37481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/212a9e9b6065483cb1f0b858b331cfe1, entries=1, sequenceid=59, filesize=4.9 K 2024-11-15T09:35:57,893 INFO [M:0;791f12959b23:37481 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 567ms, sequenceid=59, compaction requested=false 2024-11-15T09:35:57,895 INFO [M:0;791f12959b23:37481 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:35:57,896 DEBUG [M:0;791f12959b23:37481 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663357326Disabling compacts and flushes for region at 1731663357326Disabling writes for close at 1731663357326Obtaining lock to block concurrent updates at 1731663357326Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663357326Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731663357327 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663357328 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663357328Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663357343 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663357343Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663357356 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663357370 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663357370Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663357384 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663357399 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663357399Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663357413 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663357435 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663357435Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13cc14c8: reopening flushed file at 1731663357860 (+425 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@639b73d3: reopening flushed file at 1731663357869 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b9db9c4: reopening flushed file at 1731663357877 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4efa8fd0: reopening flushed file at 1731663357884 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 567ms, sequenceid=59, compaction requested=false at 1731663357893 (+9 ms)Writing region close event to WAL at 1731663357895 (+2 ms)Closed at 1731663357895 2024-11-15T09:35:57,897 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,897 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,897 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,897 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,897 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:35:57,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741830_1006 (size=27973) 2024-11-15T09:35:57,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33343 is added to blk_1073741830_1006 (size=27973) 2024-11-15T09:35:57,901 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:35:57,901 INFO [M:0;791f12959b23:37481 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:35:57,901 INFO [M:0;791f12959b23:37481 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37481 2024-11-15T09:35:57,901 INFO [M:0;791f12959b23:37481 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:35:58,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:35:58,051 INFO [M:0;791f12959b23:37481 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:35:58,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37481-0x1013dd8614b0000, quorum=127.0.0.1:56657, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:35:58,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:35:58,058 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:35:58,058 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:35:58,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:35:58,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir/,STOPPED} 2024-11-15T09:35:58,061 WARN [BP-1421409377-172.17.0.2-1731663255408 heartbeating to localhost/127.0.0.1:35471 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:35:58,061 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:35:58,061 WARN [BP-1421409377-172.17.0.2-1731663255408 heartbeating to localhost/127.0.0.1:35471 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1421409377-172.17.0.2-1731663255408 (Datanode Uuid 4e0e27a7-9048-4e32-970c-0f5f293f638d) service to localhost/127.0.0.1:35471 2024-11-15T09:35:58,061 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:35:58,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data3/current/BP-1421409377-172.17.0.2-1731663255408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:35:58,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data4/current/BP-1421409377-172.17.0.2-1731663255408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:35:58,063 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:35:58,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:35:58,065 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:35:58,065 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:35:58,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:35:58,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir/,STOPPED} 2024-11-15T09:35:58,067 WARN [BP-1421409377-172.17.0.2-1731663255408 heartbeating to localhost/127.0.0.1:35471 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:35:58,067 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:35:58,067 WARN [BP-1421409377-172.17.0.2-1731663255408 heartbeating to localhost/127.0.0.1:35471 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1421409377-172.17.0.2-1731663255408 (Datanode Uuid 96165d34-4806-4140-bf21-9f241bc57699) service to localhost/127.0.0.1:35471 2024-11-15T09:35:58,067 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:35:58,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data1/current/BP-1421409377-172.17.0.2-1731663255408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:35:58,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/cluster_b837c6ed-002c-851e-d2c4-71797032264d/data/data2/current/BP-1421409377-172.17.0.2-1731663255408 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:35:58,068 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:35:58,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:35:58,081 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:35:58,081 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:35:58,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:35:58,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir/,STOPPED} 2024-11-15T09:35:58,090 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:35:58,122 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:35:58,131 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35471 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/791f12959b23:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/791f12959b23:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35471 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35471 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/791f12959b23:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35471 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@6f391bc8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35471 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=150 (was 208), ProcessCount=11 (was 11), AvailableMemoryMB=4319 (was 4658) 2024-11-15T09:35:58,137 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=150, ProcessCount=11, AvailableMemoryMB=4319 2024-11-15T09:35:58,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:35:58,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.log.dir so I do NOT create it in target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f 2024-11-15T09:35:58,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33c25e53-2591-fe2c-6c27-61e397b8bb4e/hadoop.tmp.dir so I do NOT create it in target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436, deleteOnExit=true 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/test.cache.data in system properties and HBase conf 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:35:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:35:58,139 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:35:58,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:35:58,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:35:58,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:35:58,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:35:58,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:35:58,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:35:58,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:35:58,154 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:35:58,505 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:35:58,512 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:35:58,513 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:35:58,513 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:35:58,514 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:35:58,514 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:35:58,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:35:58,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:35:58,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/java.io.tmpdir/jetty-localhost-41321-hadoop-hdfs-3_4_1-tests_jar-_-any-2875643559850835087/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:35:58,610 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:41321} 2024-11-15T09:35:58,610 INFO [Time-limited test {}] server.Server(415): Started @105044ms 2024-11-15T09:35:58,623 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:35:58,909 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:35:58,913 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:35:58,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:35:58,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:35:58,914 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:35:58,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:35:58,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:35:59,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4595827f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/java.io.tmpdir/jetty-localhost-38047-hadoop-hdfs-3_4_1-tests_jar-_-any-690108406773539210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:35:59,013 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:38047} 2024-11-15T09:35:59,013 INFO [Time-limited test {}] server.Server(415): Started @105447ms 2024-11-15T09:35:59,015 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:35:59,049 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:35:59,053 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:35:59,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:35:59,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:35:59,054 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:35:59,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:35:59,055 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:35:59,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da5059a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/java.io.tmpdir/jetty-localhost-44377-hadoop-hdfs-3_4_1-tests_jar-_-any-16213141999913390277/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:35:59,150 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:44377} 2024-11-15T09:35:59,150 INFO [Time-limited test {}] server.Server(415): Started @105583ms 2024-11-15T09:35:59,152 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:00,086 WARN [Thread-450 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data1/current/BP-1750370677-172.17.0.2-1731663358165/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:00,086 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data2/current/BP-1750370677-172.17.0.2-1731663358165/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:00,106 WARN [Thread-414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59a60b65fdd0ac36 with lease ID 0xf7a35de2218b18f4: Processing first storage report for DS-86797a5f-430a-4314-afef-fbc3493f9066 from datanode DatanodeRegistration(127.0.0.1:40389, datanodeUuid=df63318d-1178-4596-b2f1-c4a630020a76, infoPort=40253, infoSecurePort=0, ipcPort=37893, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165) 2024-11-15T09:36:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59a60b65fdd0ac36 with lease ID 0xf7a35de2218b18f4: from storage DS-86797a5f-430a-4314-afef-fbc3493f9066 node DatanodeRegistration(127.0.0.1:40389, datanodeUuid=df63318d-1178-4596-b2f1-c4a630020a76, infoPort=40253, infoSecurePort=0, ipcPort=37893, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59a60b65fdd0ac36 with lease ID 0xf7a35de2218b18f4: Processing first storage report for DS-0a516fba-099e-48ed-b854-48edfe8e0622 from datanode DatanodeRegistration(127.0.0.1:40389, datanodeUuid=df63318d-1178-4596-b2f1-c4a630020a76, infoPort=40253, infoSecurePort=0, ipcPort=37893, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165) 2024-11-15T09:36:00,108 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59a60b65fdd0ac36 with lease ID 0xf7a35de2218b18f4: from storage DS-0a516fba-099e-48ed-b854-48edfe8e0622 node DatanodeRegistration(127.0.0.1:40389, datanodeUuid=df63318d-1178-4596-b2f1-c4a630020a76, infoPort=40253, infoSecurePort=0, ipcPort=37893, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:00,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:36:00,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:36:00,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:36:00,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T09:36:00,300 WARN [Thread-461 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data3/current/BP-1750370677-172.17.0.2-1731663358165/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:00,300 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data4/current/BP-1750370677-172.17.0.2-1731663358165/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:00,319 WARN [Thread-437 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:00,322 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe860fcb81a8e7ca6 with lease ID 0xf7a35de2218b18f5: Processing first storage report for DS-33773101-8482-4c64-b467-aba3a0eec2b8 from datanode DatanodeRegistration(127.0.0.1:36901, datanodeUuid=d2e6ca27-3222-4fd1-aae1-6e20996668bd, infoPort=40115, infoSecurePort=0, ipcPort=35003, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165) 2024-11-15T09:36:00,322 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe860fcb81a8e7ca6 with lease ID 0xf7a35de2218b18f5: from storage DS-33773101-8482-4c64-b467-aba3a0eec2b8 node DatanodeRegistration(127.0.0.1:36901, datanodeUuid=d2e6ca27-3222-4fd1-aae1-6e20996668bd, infoPort=40115, infoSecurePort=0, ipcPort=35003, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T09:36:00,322 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe860fcb81a8e7ca6 with lease ID 0xf7a35de2218b18f5: Processing first storage report for DS-03b89cf7-9683-4f78-b915-1885b01b77fc from datanode DatanodeRegistration(127.0.0.1:36901, datanodeUuid=d2e6ca27-3222-4fd1-aae1-6e20996668bd, infoPort=40115, infoSecurePort=0, ipcPort=35003, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165) 2024-11-15T09:36:00,322 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe860fcb81a8e7ca6 with lease ID 0xf7a35de2218b18f5: from storage DS-03b89cf7-9683-4f78-b915-1885b01b77fc node DatanodeRegistration(127.0.0.1:36901, datanodeUuid=d2e6ca27-3222-4fd1-aae1-6e20996668bd, infoPort=40115, infoSecurePort=0, ipcPort=35003, storageInfo=lv=-57;cid=testClusterID;nsid=1872235902;c=1731663358165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:00,394 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f 2024-11-15T09:36:00,398 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/zookeeper_0, clientPort=49586, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:36:00,399 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49586 2024-11-15T09:36:00,399 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,401 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:36:00,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:36:00,413 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4 with version=8 2024-11-15T09:36:00,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:36:00,416 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:00,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:00,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:00,416 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:00,416 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:00,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:00,417 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:36:00,417 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:00,418 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35331 2024-11-15T09:36:00,420 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35331 connecting to ZooKeeper ensemble=127.0.0.1:49586 2024-11-15T09:36:00,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:353310x0, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:00,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35331-0x1013dd9ed380000 connected 2024-11-15T09:36:00,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,586 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:00,586 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4, hbase.cluster.distributed=false 2024-11-15T09:36:00,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:00,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35331 2024-11-15T09:36:00,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35331 2024-11-15T09:36:00,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35331 2024-11-15T09:36:00,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35331 2024-11-15T09:36:00,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35331 2024-11-15T09:36:00,607 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:00,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:00,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:00,608 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:00,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:00,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:00,608 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:36:00,608 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:00,609 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37769 2024-11-15T09:36:00,610 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37769 connecting to ZooKeeper ensemble=127.0.0.1:49586 2024-11-15T09:36:00,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,613 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,628 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377690x0, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:00,629 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37769-0x1013dd9ed380001 connected 2024-11-15T09:36:00,629 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:00,629 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:36:00,632 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:36:00,633 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:36:00,635 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:00,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37769 2024-11-15T09:36:00,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37769 2024-11-15T09:36:00,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37769 2024-11-15T09:36:00,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37769 2024-11-15T09:36:00,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37769 2024-11-15T09:36:00,654 DEBUG [M:0;791f12959b23:35331 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:35331 2024-11-15T09:36:00,655 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,35331,1731663360416 2024-11-15T09:36:00,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:00,667 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:00,667 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,35331,1731663360416 2024-11-15T09:36:00,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,677 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:36:00,677 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,678 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:36:00,678 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,35331,1731663360416 from backup master directory 2024-11-15T09:36:00,688 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:00,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,35331,1731663360416 2024-11-15T09:36:00,688 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:00,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:00,688 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,35331,1731663360416 2024-11-15T09:36:00,693 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/hbase.id] with ID: 9bc65410-eae2-44bd-82b8-112288eb1e71 2024-11-15T09:36:00,693 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/.tmp/hbase.id 2024-11-15T09:36:00,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:00,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:00,705 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/.tmp/hbase.id]:[hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/hbase.id] 2024-11-15T09:36:00,722 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:00,722 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:36:00,724 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T09:36:00,734 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:36:00,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:36:00,742 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:36:00,743 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:36:00,743 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:00,753 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store 2024-11-15T09:36:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:00,762 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:00,762 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:36:00,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:00,762 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:00,762 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:36:00,762 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:00,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:00,762 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663360762Disabling compacts and flushes for region at 1731663360762Disabling writes for close at 1731663360762Writing region close event to WAL at 1731663360762Closed at 1731663360762 2024-11-15T09:36:00,763 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/.initializing 2024-11-15T09:36:00,763 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/WALs/791f12959b23,35331,1731663360416 2024-11-15T09:36:00,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C35331%2C1731663360416, suffix=, logDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/WALs/791f12959b23,35331,1731663360416, archiveDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/oldWALs, maxLogs=10 2024-11-15T09:36:00,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C35331%2C1731663360416.1731663360767 2024-11-15T09:36:00,773 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/WALs/791f12959b23,35331,1731663360416/791f12959b23%2C35331%2C1731663360416.1731663360767 2024-11-15T09:36:00,774 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40253:40253),(127.0.0.1/127.0.0.1:40115:40115)] 2024-11-15T09:36:00,775 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:00,775 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:00,775 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,775 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:36:00,782 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:00,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:00,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:36:00,785 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:00,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:00,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:36:00,789 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:00,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:00,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:36:00,792 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:00,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:00,793 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,794 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,794 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,796 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,796 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,797 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:36:00,798 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:00,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:00,801 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862948, jitterRate=0.09729531407356262}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:36:00,802 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663360775Initializing all the Stores at 1731663360777 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663360777Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663360780 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663360780Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663360780Cleaning up temporary data from old regions at 1731663360796 (+16 ms)Region opened successfully at 1731663360802 (+6 ms) 2024-11-15T09:36:00,803 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:36:00,807 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@235ea61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:00,808 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:36:00,808 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:36:00,808 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:36:00,808 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:36:00,809 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T09:36:00,809 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T09:36:00,809 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:36:00,812 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:36:00,813 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:36:00,824 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:36:00,825 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:36:00,826 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:36:00,835 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:36:00,836 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:36:00,837 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:36:00,849 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:36:00,850 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:36:00,860 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:36:00,863 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:36:00,870 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:36:00,881 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:00,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:00,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,881 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,882 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,35331,1731663360416, sessionid=0x1013dd9ed380000, setting cluster-up flag (Was=false) 2024-11-15T09:36:00,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,902 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,934 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:36:00,939 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,35331,1731663360416 2024-11-15T09:36:00,966 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:00,997 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:36:01,001 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,35331,1731663360416 2024-11-15T09:36:01,003 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:36:01,005 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:01,005 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:36:01,005 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:36:01,005 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,35331,1731663360416 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:36:01,007 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:01,007 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:01,007 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:01,008 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:01,008 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:36:01,008 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,008 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:01,008 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663391009 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:36:01,009 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:36:01,010 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,010 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:36:01,010 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:36:01,010 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:36:01,010 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:01,010 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:36:01,010 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:36:01,011 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:36:01,011 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663361011,5,FailOnTimeoutGroup] 2024-11-15T09:36:01,011 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663361011,5,FailOnTimeoutGroup] 2024-11-15T09:36:01,011 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,011 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:36:01,011 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,011 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,012 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:01,013 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:36:01,045 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(746): ClusterId : 9bc65410-eae2-44bd-82b8-112288eb1e71 2024-11-15T09:36:01,045 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:36:01,227 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:36:01,228 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:36:01,353 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:36:01,354 DEBUG [RS:0;791f12959b23:37769 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cdc4eea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:01,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:36:01,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:36:01,360 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:36:01,360 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4 2024-11-15T09:36:01,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:01,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:01,371 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:01,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:36:01,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:36:01,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:01,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:01,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:36:01,375 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:37769 2024-11-15T09:36:01,375 INFO [RS:0;791f12959b23:37769 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:36:01,376 INFO [RS:0;791f12959b23:37769 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:36:01,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:36:01,376 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:36:01,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:01,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:01,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:36:01,376 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,35331,1731663360416 with port=37769, startcode=1731663360607 2024-11-15T09:36:01,377 DEBUG [RS:0;791f12959b23:37769 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:36:01,378 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:36:01,378 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:01,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:01,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:36:01,379 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52851, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:36:01,380 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35331 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,37769,1731663360607 2024-11-15T09:36:01,380 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35331 {}] master.ServerManager(517): Registering regionserver=791f12959b23,37769,1731663360607 2024-11-15T09:36:01,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:36:01,381 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:01,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:01,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:36:01,382 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4 2024-11-15T09:36:01,382 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33511 2024-11-15T09:36:01,382 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:36:01,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740 2024-11-15T09:36:01,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740 2024-11-15T09:36:01,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:36:01,384 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:36:01,385 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:36:01,386 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:36:01,388 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:01,389 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701837, jitterRate=-0.10756903886795044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:36:01,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663361371Initializing all the Stores at 1731663361371Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663361372 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663361372Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663361372Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663361372Cleaning up temporary data from old regions at 1731663361384 (+12 ms)Region opened successfully at 1731663361390 (+6 ms) 2024-11-15T09:36:01,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:36:01,390 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:36:01,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:36:01,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:36:01,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:36:01,391 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:01,391 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663361390Disabling compacts and flushes for region at 1731663361390Disabling writes for close at 1731663361390Writing region close event to WAL at 1731663361390Closed at 1731663361391 (+1 ms) 2024-11-15T09:36:01,392 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:01,392 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:36:01,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:36:01,394 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:36:01,395 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:36:01,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:01,494 DEBUG [RS:0;791f12959b23:37769 {}] zookeeper.ZKUtil(111): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,37769,1731663360607 2024-11-15T09:36:01,494 WARN [RS:0;791f12959b23:37769 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:01,494 INFO [RS:0;791f12959b23:37769 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:01,494 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,37769,1731663360607] 2024-11-15T09:36:01,494 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/WALs/791f12959b23,37769,1731663360607 2024-11-15T09:36:01,500 INFO [RS:0;791f12959b23:37769 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:36:01,505 INFO [RS:0;791f12959b23:37769 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:36:01,506 INFO [RS:0;791f12959b23:37769 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:36:01,506 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,506 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:36:01,507 INFO [RS:0;791f12959b23:37769 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:36:01,507 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,507 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,507 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,507 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,507 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:01,508 DEBUG [RS:0;791f12959b23:37769 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:01,509 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,509 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,509 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,509 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,509 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,509 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37769,1731663360607-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:01,524 INFO [RS:0;791f12959b23:37769 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:36:01,524 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37769,1731663360607-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,524 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,525 INFO [RS:0;791f12959b23:37769 {}] regionserver.Replication(171): 791f12959b23,37769,1731663360607 started 2024-11-15T09:36:01,538 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:01,538 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,37769,1731663360607, RpcServer on 791f12959b23/172.17.0.2:37769, sessionid=0x1013dd9ed380001 2024-11-15T09:36:01,538 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:36:01,538 DEBUG [RS:0;791f12959b23:37769 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,37769,1731663360607 2024-11-15T09:36:01,538 DEBUG [RS:0;791f12959b23:37769 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,37769,1731663360607' 2024-11-15T09:36:01,538 DEBUG [RS:0;791f12959b23:37769 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:36:01,539 DEBUG [RS:0;791f12959b23:37769 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:36:01,539 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:36:01,539 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:36:01,539 DEBUG [RS:0;791f12959b23:37769 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,37769,1731663360607 2024-11-15T09:36:01,539 DEBUG [RS:0;791f12959b23:37769 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,37769,1731663360607' 2024-11-15T09:36:01,539 DEBUG [RS:0;791f12959b23:37769 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:36:01,540 DEBUG [RS:0;791f12959b23:37769 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:36:01,540 DEBUG [RS:0;791f12959b23:37769 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:36:01,540 INFO [RS:0;791f12959b23:37769 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:36:01,541 INFO [RS:0;791f12959b23:37769 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:36:01,546 WARN [791f12959b23:35331 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:36:01,646 INFO [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37769%2C1731663360607, suffix=, logDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/WALs/791f12959b23,37769,1731663360607, archiveDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/oldWALs, maxLogs=32 2024-11-15T09:36:01,653 INFO [RS:0;791f12959b23:37769 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37769%2C1731663360607.1731663361652 2024-11-15T09:36:01,661 INFO [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/WALs/791f12959b23,37769,1731663360607/791f12959b23%2C37769%2C1731663360607.1731663361652 2024-11-15T09:36:01,662 DEBUG [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40253:40253),(127.0.0.1/127.0.0.1:40115:40115)] 2024-11-15T09:36:01,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:01,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:01,796 DEBUG [791f12959b23:35331 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:36:01,797 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,37769,1731663360607 2024-11-15T09:36:01,800 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,37769,1731663360607, state=OPENING 2024-11-15T09:36:01,945 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:36:01,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:01,955 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:01,956 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:36:01,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:01,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:01,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,37769,1731663360607}] 2024-11-15T09:36:02,112 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:36:02,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56555, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:36:02,124 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:36:02,124 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:02,126 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37769%2C1731663360607.meta, suffix=.meta, logDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/WALs/791f12959b23,37769,1731663360607, archiveDir=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/oldWALs, maxLogs=32 2024-11-15T09:36:02,129 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37769%2C1731663360607.meta.1731663362129.meta 2024-11-15T09:36:02,135 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/WALs/791f12959b23,37769,1731663360607/791f12959b23%2C37769%2C1731663360607.meta.1731663362129.meta 2024-11-15T09:36:02,136 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40115:40115),(127.0.0.1/127.0.0.1:40253:40253)] 2024-11-15T09:36:02,137 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:02,137 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:36:02,137 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:36:02,137 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:36:02,137 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:36:02,137 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:02,137 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:36:02,138 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:36:02,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:36:02,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:36:02,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:02,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:02,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:36:02,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:36:02,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:02,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:02,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:36:02,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:36:02,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:02,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:02,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:36:02,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:36:02,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:02,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:02,146 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:36:02,147 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740 2024-11-15T09:36:02,149 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740 2024-11-15T09:36:02,150 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:36:02,150 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:36:02,151 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:36:02,153 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:36:02,154 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712974, jitterRate=-0.09340691566467285}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:36:02,155 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:36:02,156 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663362138Writing region info on filesystem at 1731663362138Initializing all the Stores at 1731663362139 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663362139Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663362139Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663362139Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663362139Cleaning up temporary data from old regions at 1731663362151 (+12 ms)Running coprocessor post-open hooks at 1731663362155 (+4 ms)Region opened successfully at 1731663362156 (+1 ms) 2024-11-15T09:36:02,157 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663362111 2024-11-15T09:36:02,160 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:36:02,160 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:36:02,161 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,37769,1731663360607 2024-11-15T09:36:02,163 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,37769,1731663360607, state=OPEN 2024-11-15T09:36:02,234 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:36:02,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:02,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:02,292 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:36:02,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:36:02,292 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,37769,1731663360607 2024-11-15T09:36:02,292 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:02,292 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:02,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:36:02,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,37769,1731663360607 in 336 msec 2024-11-15T09:36:02,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:36:02,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 904 msec 2024-11-15T09:36:02,301 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:02,301 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:36:02,303 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:36:02,303 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,37769,1731663360607, seqNum=-1] 2024-11-15T09:36:02,303 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:36:02,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54119, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:36:02,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3070 sec 2024-11-15T09:36:02,313 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663362313, completionTime=-1 2024-11-15T09:36:02,313 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:36:02,313 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:36:02,315 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:36:02,315 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663422315 2024-11-15T09:36:02,315 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663482315 2024-11-15T09:36:02,315 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T09:36:02,316 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35331,1731663360416-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,316 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35331,1731663360416-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,316 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35331,1731663360416-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,316 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:35331, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,316 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,316 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,318 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.633sec 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35331,1731663360416-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:02,321 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35331,1731663360416-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:36:02,324 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:36:02,324 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:36:02,324 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35331,1731663360416-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:02,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6512930c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:02,345 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,35331,-1 for getting cluster id 2024-11-15T09:36:02,345 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:36:02,347 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9bc65410-eae2-44bd-82b8-112288eb1e71' 2024-11-15T09:36:02,348 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:36:02,348 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9bc65410-eae2-44bd-82b8-112288eb1e71" 2024-11-15T09:36:02,349 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aa79d58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:02,349 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,35331,-1] 2024-11-15T09:36:02,349 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:36:02,350 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:02,351 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57836, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:36:02,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd69191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:02,353 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:36:02,354 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,37769,1731663360607, seqNum=-1] 2024-11-15T09:36:02,355 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:36:02,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37962, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:36:02,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,35331,1731663360416 2024-11-15T09:36:02,360 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:02,363 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:36:02,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:36:02,364 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:36:02,364 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:02,364 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:02,364 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:02,364 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:36:02,364 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:36:02,364 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=604601029, stopped=false 2024-11-15T09:36:02,365 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,35331,1731663360416 2024-11-15T09:36:02,386 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:02,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:02,386 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:02,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:02,386 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:36:02,387 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:36:02,387 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:02,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:02,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:02,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:02,387 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,37769,1731663360607' ***** 2024-11-15T09:36:02,387 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:36:02,387 INFO [RS:0;791f12959b23:37769 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:36:02,388 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,37769,1731663360607 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:37769. 2024-11-15T09:36:02,388 DEBUG [RS:0;791f12959b23:37769 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:02,388 DEBUG [RS:0;791f12959b23:37769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:36:02,388 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:36:02,389 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T09:36:02,389 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T09:36:02,389 DEBUG [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T09:36:02,389 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:36:02,389 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:36:02,389 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:36:02,389 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:36:02,389 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:36:02,390 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T09:36:02,408 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/.tmp/ns/dab99f69abfd49bd82c0ed31853b2e0a is 43, key is default/ns:d/1731663362305/Put/seqid=0 2024-11-15T09:36:02,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741835_1011 (size=5153) 2024-11-15T09:36:02,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741835_1011 (size=5153) 2024-11-15T09:36:02,415 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/.tmp/ns/dab99f69abfd49bd82c0ed31853b2e0a 2024-11-15T09:36:02,422 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/.tmp/ns/dab99f69abfd49bd82c0ed31853b2e0a as hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/ns/dab99f69abfd49bd82c0ed31853b2e0a 2024-11-15T09:36:02,429 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/ns/dab99f69abfd49bd82c0ed31853b2e0a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T09:36:02,431 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-15T09:36:02,431 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T09:36:02,436 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T09:36:02,437 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:36:02,437 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:02,437 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663362389Running coprocessor pre-close hooks at 1731663362389Disabling compacts and flushes for region at 1731663362389Disabling writes for close at 1731663362389Obtaining lock to block concurrent updates at 1731663362390 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731663362390Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731663362390Flushing stores of hbase:meta,,1.1588230740 at 1731663362391 (+1 ms)Flushing 1588230740/ns: creating writer at 1731663362391Flushing 1588230740/ns: appending metadata at 1731663362408 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731663362408Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f5ce6c1: reopening flushed file at 1731663362421 (+13 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1731663362431 (+10 ms)Writing region close event to WAL at 1731663362432 (+1 ms)Running coprocessor post-close hooks at 1731663362437 (+5 ms)Closed at 1731663362437 2024-11-15T09:36:02,438 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:02,555 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T09:36:02,556 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T09:36:02,589 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,37769,1731663360607; all regions closed. 2024-11-15T09:36:02,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,590 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,590 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741834_1010 (size=1152) 2024-11-15T09:36:02,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741834_1010 (size=1152) 2024-11-15T09:36:02,595 DEBUG [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/oldWALs 2024-11-15T09:36:02,595 INFO [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C37769%2C1731663360607.meta:.meta(num 1731663362129) 2024-11-15T09:36:02,596 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,596 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,596 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,596 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,596 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741833_1009 (size=93) 2024-11-15T09:36:02,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741833_1009 (size=93) 2024-11-15T09:36:02,601 DEBUG [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/oldWALs 2024-11-15T09:36:02,601 INFO [RS:0;791f12959b23:37769 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C37769%2C1731663360607:(num 1731663361652) 2024-11-15T09:36:02,601 DEBUG [RS:0;791f12959b23:37769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:02,601 INFO [RS:0;791f12959b23:37769 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:36:02,601 INFO [RS:0;791f12959b23:37769 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:36:02,601 INFO [RS:0;791f12959b23:37769 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T09:36:02,601 INFO [RS:0;791f12959b23:37769 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:36:02,601 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:36:02,602 INFO [RS:0;791f12959b23:37769 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37769 2024-11-15T09:36:02,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:02,614 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,37769,1731663360607 2024-11-15T09:36:02,614 INFO [RS:0;791f12959b23:37769 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:36:02,614 ERROR [pool-180-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$363/0x00007fc3f4903f28@5ce4ceda rejected from java.util.concurrent.ThreadPoolExecutor@19e65b22[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-15T09:36:02,625 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,37769,1731663360607] 2024-11-15T09:36:02,635 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,37769,1731663360607 already deleted, retry=false 2024-11-15T09:36:02,635 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,37769,1731663360607 expired; onlineServers=0 2024-11-15T09:36:02,635 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,35331,1731663360416' ***** 2024-11-15T09:36:02,635 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:36:02,635 INFO [M:0;791f12959b23:35331 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:36:02,635 INFO [M:0;791f12959b23:35331 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:36:02,636 DEBUG [M:0;791f12959b23:35331 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:36:02,636 DEBUG [M:0;791f12959b23:35331 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:36:02,636 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:36:02,636 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663361011 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663361011,5,FailOnTimeoutGroup] 2024-11-15T09:36:02,636 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663361011 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663361011,5,FailOnTimeoutGroup] 2024-11-15T09:36:02,636 INFO [M:0;791f12959b23:35331 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:36:02,636 INFO [M:0;791f12959b23:35331 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:36:02,636 DEBUG [M:0;791f12959b23:35331 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:36:02,636 INFO [M:0;791f12959b23:35331 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:36:02,636 INFO [M:0;791f12959b23:35331 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:36:02,636 INFO [M:0;791f12959b23:35331 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:36:02,637 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:36:02,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:36:02,646 DEBUG [M:0;791f12959b23:35331 {}] zookeeper.ZKUtil(347): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:36:02,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:02,646 WARN [M:0;791f12959b23:35331 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:36:02,646 INFO [M:0;791f12959b23:35331 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/.lastflushedseqids 2024-11-15T09:36:02,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741836_1012 (size=99) 2024-11-15T09:36:02,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741836_1012 (size=99) 2024-11-15T09:36:02,653 INFO [M:0;791f12959b23:35331 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:36:02,653 INFO [M:0;791f12959b23:35331 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:36:02,653 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:36:02,653 INFO [M:0;791f12959b23:35331 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:02,653 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:02,653 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:36:02,653 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:02,654 INFO [M:0;791f12959b23:35331 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T09:36:02,670 DEBUG [M:0;791f12959b23:35331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e91c9efecb794170b06061765cade0af is 82, key is hbase:meta,,1/info:regioninfo/1731663362161/Put/seqid=0 2024-11-15T09:36:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741837_1013 (size=5672) 2024-11-15T09:36:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741837_1013 (size=5672) 2024-11-15T09:36:02,677 INFO [M:0;791f12959b23:35331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e91c9efecb794170b06061765cade0af 2024-11-15T09:36:02,699 DEBUG [M:0;791f12959b23:35331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78966beaabb7420dade3d53049f07160 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731663362311/Put/seqid=0 2024-11-15T09:36:02,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741838_1014 (size=5275) 2024-11-15T09:36:02,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741838_1014 (size=5275) 2024-11-15T09:36:02,706 INFO [M:0;791f12959b23:35331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78966beaabb7420dade3d53049f07160 2024-11-15T09:36:02,725 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:02,725 INFO [RS:0;791f12959b23:37769 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:36:02,725 INFO [RS:0;791f12959b23:37769 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,37769,1731663360607; zookeeper connection closed. 2024-11-15T09:36:02,725 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37769-0x1013dd9ed380001, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:02,725 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2f4a5e9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2f4a5e9 2024-11-15T09:36:02,725 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T09:36:02,734 DEBUG [M:0;791f12959b23:35331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2fde8582cdff4c458915916565d57ad4 is 69, key is 791f12959b23,37769,1731663360607/rs:state/1731663361380/Put/seqid=0 2024-11-15T09:36:02,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741839_1015 (size=5156) 2024-11-15T09:36:02,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741839_1015 (size=5156) 2024-11-15T09:36:02,741 INFO [M:0;791f12959b23:35331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2fde8582cdff4c458915916565d57ad4 2024-11-15T09:36:02,762 DEBUG [M:0;791f12959b23:35331 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/263bf460bc9c441a843b5e90d000931e is 52, key is load_balancer_on/state:d/1731663362362/Put/seqid=0 2024-11-15T09:36:02,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741840_1016 (size=5056) 2024-11-15T09:36:02,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741840_1016 (size=5056) 2024-11-15T09:36:02,768 INFO [M:0;791f12959b23:35331 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/263bf460bc9c441a843b5e90d000931e 2024-11-15T09:36:02,778 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e91c9efecb794170b06061765cade0af as hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e91c9efecb794170b06061765cade0af 2024-11-15T09:36:02,786 INFO [M:0;791f12959b23:35331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e91c9efecb794170b06061765cade0af, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T09:36:02,787 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78966beaabb7420dade3d53049f07160 as hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/78966beaabb7420dade3d53049f07160 2024-11-15T09:36:02,794 INFO [M:0;791f12959b23:35331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/78966beaabb7420dade3d53049f07160, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T09:36:02,796 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2fde8582cdff4c458915916565d57ad4 as hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2fde8582cdff4c458915916565d57ad4 2024-11-15T09:36:02,804 INFO [M:0;791f12959b23:35331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2fde8582cdff4c458915916565d57ad4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T09:36:02,805 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/263bf460bc9c441a843b5e90d000931e as hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/263bf460bc9c441a843b5e90d000931e 2024-11-15T09:36:02,811 INFO [M:0;791f12959b23:35331 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33511/user/jenkins/test-data/ab7958db-2ffe-f946-82a6-97d924391ed4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/263bf460bc9c441a843b5e90d000931e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T09:36:02,813 INFO [M:0;791f12959b23:35331 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 159ms, sequenceid=29, compaction requested=false 2024-11-15T09:36:02,814 INFO [M:0;791f12959b23:35331 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:02,814 DEBUG [M:0;791f12959b23:35331 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663362653Disabling compacts and flushes for region at 1731663362653Disabling writes for close at 1731663362653Obtaining lock to block concurrent updates at 1731663362654 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663362654Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731663362654Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663362655 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663362655Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663362670 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663362670Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663362684 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663362698 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663362698Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663362713 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663362734 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663362734Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663362747 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663362761 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663362761Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67ed5689: reopening flushed file at 1731663362775 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b5f859e: reopening flushed file at 1731663362786 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73dbc758: reopening flushed file at 1731663362794 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dd12a0e: reopening flushed file at 1731663362804 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 159ms, sequenceid=29, compaction requested=false at 1731663362813 (+9 ms)Writing region close event to WAL at 1731663362814 (+1 ms)Closed at 1731663362814 2024-11-15T09:36:02,814 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,815 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,815 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,815 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,815 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:02,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36901 is added to blk_1073741830_1006 (size=10311) 2024-11-15T09:36:02,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40389 is added to blk_1073741830_1006 (size=10311) 2024-11-15T09:36:02,818 INFO [M:0;791f12959b23:35331 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:36:02,818 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:36:02,818 INFO [M:0;791f12959b23:35331 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35331 2024-11-15T09:36:02,818 INFO [M:0;791f12959b23:35331 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:36:02,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:02,928 INFO [M:0;791f12959b23:35331 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:36:02,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35331-0x1013dd9ed380000, quorum=127.0.0.1:49586, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:02,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da5059a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:02,931 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:02,931 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:02,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:02,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:02,933 WARN [BP-1750370677-172.17.0.2-1731663358165 heartbeating to localhost/127.0.0.1:33511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:02,933 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:02,933 WARN [BP-1750370677-172.17.0.2-1731663358165 heartbeating to localhost/127.0.0.1:33511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1750370677-172.17.0.2-1731663358165 (Datanode Uuid d2e6ca27-3222-4fd1-aae1-6e20996668bd) service to localhost/127.0.0.1:33511 2024-11-15T09:36:02,933 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:02,933 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data3/current/BP-1750370677-172.17.0.2-1731663358165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:02,934 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data4/current/BP-1750370677-172.17.0.2-1731663358165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:02,934 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:02,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4595827f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:02,936 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:02,936 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:02,936 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:02,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:02,938 WARN [BP-1750370677-172.17.0.2-1731663358165 heartbeating to localhost/127.0.0.1:33511 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:02,938 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:02,938 WARN [BP-1750370677-172.17.0.2-1731663358165 heartbeating to localhost/127.0.0.1:33511 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1750370677-172.17.0.2-1731663358165 (Datanode Uuid df63318d-1178-4596-b2f1-c4a630020a76) service to localhost/127.0.0.1:33511 2024-11-15T09:36:02,938 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:02,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data1/current/BP-1750370677-172.17.0.2-1731663358165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:02,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/cluster_94079e49-e2a7-7639-ada5-2d9cce0ca436/data/data2/current/BP-1750370677-172.17.0.2-1731663358165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:02,939 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:02,944 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:36:02,945 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:02,945 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:02,945 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:02,945 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:02,950 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.log.dir so I do NOT create it in target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b687802d-5cd1-98d0-1d6c-50bcce98150f/hadoop.tmp.dir so I do NOT create it in target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a, deleteOnExit=true 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/test.cache.data in system properties and HBase conf 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:36:02,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:36:02,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:36:02,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:36:02,968 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:36:02,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:36:02,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:36:02,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:36:02,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:36:02,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:36:02,986 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:36:03,269 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:03,275 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:03,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:03,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:03,276 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:36:03,277 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:03,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:03,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:03,378 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@94a50db{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-37993-hadoop-hdfs-3_4_1-tests_jar-_-any-10746279873068787180/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:36:03,379 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:37993} 2024-11-15T09:36:03,379 INFO [Time-limited test {}] server.Server(415): Started @109812ms 2024-11-15T09:36:03,392 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:36:03,510 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:36:03,642 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:03,646 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:03,647 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:03,647 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:03,647 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:36:03,648 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68a89b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:03,648 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cb9bebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:03,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d327fd2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-33299-hadoop-hdfs-3_4_1-tests_jar-_-any-15504144032747972514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:03,748 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c80aceb{HTTP/1.1, (http/1.1)}{localhost:33299} 2024-11-15T09:36:03,748 INFO [Time-limited test {}] server.Server(415): Started @110181ms 2024-11-15T09:36:03,750 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:03,793 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:03,797 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:03,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:03,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:03,799 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:36:03,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcfaad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:03,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7f19bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:03,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@597807df{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-35435-hadoop-hdfs-3_4_1-tests_jar-_-any-17906415519199095597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:03,916 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@401bd933{HTTP/1.1, (http/1.1)}{localhost:35435} 2024-11-15T09:36:03,916 INFO [Time-limited test {}] server.Server(415): Started @110349ms 2024-11-15T09:36:03,918 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:04,792 WARN [Thread-670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data1/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:04,792 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data2/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:04,818 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ea6ca821918f0f with lease ID 0xcd48484be83b2092: Processing first storage report for DS-f98b315c-1dd2-4482-8677-bcb94341761d from datanode DatanodeRegistration(127.0.0.1:45915, datanodeUuid=abaf1894-c114-4357-953a-ec1e5c1e419f, infoPort=41815, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ea6ca821918f0f with lease ID 0xcd48484be83b2092: from storage DS-f98b315c-1dd2-4482-8677-bcb94341761d node DatanodeRegistration(127.0.0.1:45915, datanodeUuid=abaf1894-c114-4357-953a-ec1e5c1e419f, infoPort=41815, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T09:36:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ea6ca821918f0f with lease ID 0xcd48484be83b2092: Processing first storage report for DS-b738761f-3ce8-4672-80d9-fe6ec834bb38 from datanode DatanodeRegistration(127.0.0.1:45915, datanodeUuid=abaf1894-c114-4357-953a-ec1e5c1e419f, infoPort=41815, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ea6ca821918f0f with lease ID 0xcd48484be83b2092: from storage DS-b738761f-3ce8-4672-80d9-fe6ec834bb38 node DatanodeRegistration(127.0.0.1:45915, datanodeUuid=abaf1894-c114-4357-953a-ec1e5c1e419f, infoPort=41815, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:04,993 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data3/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:05,008 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data4/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:05,052 WARN [Thread-657 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:05,059 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b276f2bcb823cdb with lease ID 0xcd48484be83b2093: Processing first storage report for DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e from datanode DatanodeRegistration(127.0.0.1:38481, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=44921, infoSecurePort=0, ipcPort=38871, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b276f2bcb823cdb with lease ID 0xcd48484be83b2093: from storage DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e node DatanodeRegistration(127.0.0.1:38481, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=44921, infoSecurePort=0, ipcPort=38871, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b276f2bcb823cdb with lease ID 0xcd48484be83b2093: Processing first storage report for DS-b185c7f9-f7be-4bd7-ad9a-d8806a9fc06f from datanode DatanodeRegistration(127.0.0.1:38481, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=44921, infoSecurePort=0, ipcPort=38871, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b276f2bcb823cdb with lease ID 0xcd48484be83b2093: from storage DS-b185c7f9-f7be-4bd7-ad9a-d8806a9fc06f node DatanodeRegistration(127.0.0.1:38481, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=44921, infoSecurePort=0, ipcPort=38871, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:05,071 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c 2024-11-15T09:36:05,076 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/zookeeper_0, clientPort=58696, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:36:05,077 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58696 2024-11-15T09:36:05,078 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:36:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:36:05,098 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54 with version=8 2024-11-15T09:36:05,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:36:05,100 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:36:05,101 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:05,102 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39767 2024-11-15T09:36:05,104 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39767 connecting to ZooKeeper ensemble=127.0.0.1:58696 2024-11-15T09:36:05,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:397670x0, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:05,150 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39767-0x1013dd9ff7e0000 connected 2024-11-15T09:36:05,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:05,245 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54, hbase.cluster.distributed=false 2024-11-15T09:36:05,246 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:05,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39767 2024-11-15T09:36:05,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39767 2024-11-15T09:36:05,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39767 2024-11-15T09:36:05,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39767 2024-11-15T09:36:05,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39767 2024-11-15T09:36:05,267 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:05,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:05,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:05,268 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:05,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:05,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:05,268 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:36:05,268 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:05,269 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46093 2024-11-15T09:36:05,271 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46093 connecting to ZooKeeper ensemble=127.0.0.1:58696 2024-11-15T09:36:05,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,291 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460930x0, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:05,292 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:460930x0, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:05,292 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46093-0x1013dd9ff7e0001 connected 2024-11-15T09:36:05,292 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:36:05,293 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:36:05,293 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:36:05,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:05,300 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46093 2024-11-15T09:36:05,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46093 2024-11-15T09:36:05,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46093 2024-11-15T09:36:05,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46093 2024-11-15T09:36:05,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46093 2024-11-15T09:36:05,319 DEBUG [M:0;791f12959b23:39767 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:39767 2024-11-15T09:36:05,320 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,39767,1731663365100 2024-11-15T09:36:05,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:05,333 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:05,334 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,39767,1731663365100 2024-11-15T09:36:05,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,344 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:36:05,344 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,349 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:36:05,349 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,39767,1731663365100 from backup master directory 2024-11-15T09:36:05,361 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:05,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,39767,1731663365100 2024-11-15T09:36:05,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:05,361 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:05,361 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,39767,1731663365100 2024-11-15T09:36:05,367 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/hbase.id] with ID: e03c51d2-3fb6-4754-bbc4-c2e69d458a6a 2024-11-15T09:36:05,367 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/.tmp/hbase.id 2024-11-15T09:36:05,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:05,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:05,384 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/.tmp/hbase.id]:[hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/hbase.id] 2024-11-15T09:36:05,400 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:05,400 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:36:05,402 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T09:36:05,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,414 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:36:05,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:36:05,429 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:36:05,430 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:36:05,430 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:05,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:05,441 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store 2024-11-15T09:36:05,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:05,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:05,451 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:05,451 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:36:05,451 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:05,451 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:05,451 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:36:05,451 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:05,451 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:05,451 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663365451Disabling compacts and flushes for region at 1731663365451Disabling writes for close at 1731663365451Writing region close event to WAL at 1731663365451Closed at 1731663365451 2024-11-15T09:36:05,452 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/.initializing 2024-11-15T09:36:05,452 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100 2024-11-15T09:36:05,456 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C39767%2C1731663365100, suffix=, logDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100, archiveDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/oldWALs, maxLogs=10 2024-11-15T09:36:05,457 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39767%2C1731663365100.1731663365457 2024-11-15T09:36:05,464 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 2024-11-15T09:36:05,465 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41815:41815),(127.0.0.1/127.0.0.1:44921:44921)] 2024-11-15T09:36:05,467 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:05,467 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:05,467 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,467 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:36:05,473 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:05,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:36:05,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:05,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,479 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:36:05,479 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:05,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:36:05,483 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:05,484 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,485 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,485 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,487 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,487 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,487 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:36:05,489 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:05,491 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:05,492 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809802, jitterRate=0.029716625809669495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:36:05,493 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663365467Initializing all the Stores at 1731663365468 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663365469 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663365470 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663365470Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663365470Cleaning up temporary data from old regions at 1731663365487 (+17 ms)Region opened successfully at 1731663365493 (+6 ms) 2024-11-15T09:36:05,494 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:36:05,498 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c0e01b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:05,499 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:36:05,500 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:36:05,500 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:36:05,500 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:36:05,501 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T09:36:05,501 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T09:36:05,501 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:36:05,504 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:36:05,505 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:36:05,512 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:36:05,513 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:36:05,514 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:36:05,523 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:36:05,523 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:36:05,524 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:36:05,533 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:36:05,534 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:36:05,544 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:36:05,546 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:36:05,554 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:36:05,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:05,565 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:05,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,565 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,566 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,39767,1731663365100, sessionid=0x1013dd9ff7e0000, setting cluster-up flag (Was=false) 2024-11-15T09:36:05,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,586 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,618 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:36:05,619 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,39767,1731663365100 2024-11-15T09:36:05,638 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:05,670 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:36:05,672 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,39767,1731663365100 2024-11-15T09:36:05,674 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:36:05,676 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:05,676 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:36:05,677 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:36:05,677 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,39767,1731663365100 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:36:05,678 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:05,679 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663395680 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:36:05,680 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:36:05,681 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:05,681 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:36:05,683 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,683 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,683 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:36:05,684 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:36:05,684 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:36:05,684 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:36:05,684 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:36:05,684 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:36:05,688 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663365684,5,FailOnTimeoutGroup] 2024-11-15T09:36:05,688 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663365688,5,FailOnTimeoutGroup] 2024-11-15T09:36:05,689 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,689 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:36:05,689 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,689 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:36:05,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:36:05,695 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:36:05,695 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54 2024-11-15T09:36:05,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:05,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:05,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:05,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:36:05,708 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(746): ClusterId : e03c51d2-3fb6-4754-bbc4-c2e69d458a6a 2024-11-15T09:36:05,708 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:36:05,708 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:36:05,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:05,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:36:05,711 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:36:05,711 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:05,712 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:36:05,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:36:05,714 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:05,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:36:05,717 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:36:05,717 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:05,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:05,718 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:36:05,719 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740 2024-11-15T09:36:05,719 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740 2024-11-15T09:36:05,720 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:36:05,720 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:36:05,721 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:36:05,721 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:36:05,721 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:36:05,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:36:05,726 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:05,726 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693632, jitterRate=-0.11800196766853333}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:36:05,727 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663365704Initializing all the Stores at 1731663365706 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663365706Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663365706Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663365706Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663365706Cleaning up temporary data from old regions at 1731663365721 (+15 ms)Region opened successfully at 1731663365727 (+6 ms) 2024-11-15T09:36:05,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:36:05,728 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:36:05,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:36:05,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:36:05,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:36:05,728 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:05,728 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663365728Disabling compacts and flushes for region at 1731663365728Disabling writes for close at 1731663365728Writing region close event to WAL at 1731663365728Closed at 1731663365728 2024-11-15T09:36:05,730 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:05,730 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:36:05,730 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:36:05,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:36:05,731 DEBUG [RS:0;791f12959b23:46093 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62b6da10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:05,733 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:36:05,735 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:36:05,746 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:46093 2024-11-15T09:36:05,746 INFO [RS:0;791f12959b23:46093 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:36:05,746 INFO [RS:0;791f12959b23:46093 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:36:05,746 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:36:05,747 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,39767,1731663365100 with port=46093, startcode=1731663365267 2024-11-15T09:36:05,747 DEBUG [RS:0;791f12959b23:46093 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:36:05,749 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40925, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:36:05,750 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39767 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,46093,1731663365267 2024-11-15T09:36:05,750 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39767 {}] master.ServerManager(517): Registering regionserver=791f12959b23,46093,1731663365267 2024-11-15T09:36:05,752 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54 2024-11-15T09:36:05,752 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42831 2024-11-15T09:36:05,752 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:36:05,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:05,766 DEBUG [RS:0;791f12959b23:46093 {}] zookeeper.ZKUtil(111): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,46093,1731663365267 2024-11-15T09:36:05,766 WARN [RS:0;791f12959b23:46093 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:05,766 INFO [RS:0;791f12959b23:46093 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:05,766 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267 2024-11-15T09:36:05,766 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,46093,1731663365267] 2024-11-15T09:36:05,770 INFO [RS:0;791f12959b23:46093 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:36:05,773 INFO [RS:0;791f12959b23:46093 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:36:05,773 INFO [RS:0;791f12959b23:46093 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:36:05,773 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,773 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:36:05,775 INFO [RS:0;791f12959b23:46093 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:36:05,775 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,775 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,775 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,775 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,775 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:05,776 DEBUG [RS:0;791f12959b23:46093 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:05,784 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,784 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,784 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,784 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,785 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,785 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,46093,1731663365267-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:05,810 INFO [RS:0;791f12959b23:46093 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:36:05,810 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,46093,1731663365267-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,810 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,810 INFO [RS:0;791f12959b23:46093 {}] regionserver.Replication(171): 791f12959b23,46093,1731663365267 started 2024-11-15T09:36:05,836 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:05,836 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,46093,1731663365267, RpcServer on 791f12959b23/172.17.0.2:46093, sessionid=0x1013dd9ff7e0001 2024-11-15T09:36:05,836 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:36:05,836 DEBUG [RS:0;791f12959b23:46093 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,46093,1731663365267 2024-11-15T09:36:05,836 DEBUG [RS:0;791f12959b23:46093 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,46093,1731663365267' 2024-11-15T09:36:05,836 DEBUG [RS:0;791f12959b23:46093 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:36:05,837 DEBUG [RS:0;791f12959b23:46093 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:36:05,838 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:36:05,838 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:36:05,838 DEBUG [RS:0;791f12959b23:46093 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,46093,1731663365267 2024-11-15T09:36:05,838 DEBUG [RS:0;791f12959b23:46093 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,46093,1731663365267' 2024-11-15T09:36:05,838 DEBUG [RS:0;791f12959b23:46093 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:36:05,839 DEBUG [RS:0;791f12959b23:46093 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:36:05,839 DEBUG [RS:0;791f12959b23:46093 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:36:05,839 INFO [RS:0;791f12959b23:46093 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:36:05,839 INFO [RS:0;791f12959b23:46093 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:36:05,885 WARN [791f12959b23:39767 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:36:05,942 INFO [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C46093%2C1731663365267, suffix=, logDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267, archiveDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs, maxLogs=32 2024-11-15T09:36:05,943 INFO [RS:0;791f12959b23:46093 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.1731663365943 2024-11-15T09:36:05,958 INFO [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 2024-11-15T09:36:05,989 DEBUG [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44921:44921),(127.0.0.1/127.0.0.1:41815:41815)] 2024-11-15T09:36:06,135 DEBUG [791f12959b23:39767 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:36:06,136 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,46093,1731663365267 2024-11-15T09:36:06,138 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,46093,1731663365267, state=OPENING 2024-11-15T09:36:06,186 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:36:06,196 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:06,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:06,197 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:36:06,197 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:06,197 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,46093,1731663365267}] 2024-11-15T09:36:06,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:06,351 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:36:06,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34171, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:36:06,359 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:36:06,359 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:06,362 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C46093%2C1731663365267.meta, suffix=.meta, logDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267, archiveDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs, maxLogs=32 2024-11-15T09:36:06,363 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta 2024-11-15T09:36:06,377 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta 2024-11-15T09:36:06,388 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44921:44921),(127.0.0.1/127.0.0.1:41815:41815)] 2024-11-15T09:36:06,392 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:06,393 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:36:06,393 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:36:06,394 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:36:06,394 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:36:06,394 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:06,394 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:36:06,394 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:36:06,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:36:06,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:36:06,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:06,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:06,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:36:06,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:36:06,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:06,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:06,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:36:06,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:36:06,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:06,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:06,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:36:06,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:36:06,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:06,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:06,406 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:36:06,408 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740 2024-11-15T09:36:06,409 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740 2024-11-15T09:36:06,411 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:36:06,411 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:36:06,412 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:36:06,414 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:36:06,415 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714669, jitterRate=-0.09125255048274994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:36:06,415 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:36:06,416 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663366394Writing region info on filesystem at 1731663366394Initializing all the Stores at 1731663366395 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663366395Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663366396 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663366396Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663366396Cleaning up temporary data from old regions at 1731663366411 (+15 ms)Running coprocessor post-open hooks at 1731663366415 (+4 ms)Region opened successfully at 1731663366416 (+1 ms) 2024-11-15T09:36:06,418 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663366351 2024-11-15T09:36:06,421 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:36:06,421 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:36:06,422 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,46093,1731663365267 2024-11-15T09:36:06,424 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,46093,1731663365267, state=OPEN 2024-11-15T09:36:06,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:36:06,463 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:36:06,463 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,46093,1731663365267 2024-11-15T09:36:06,463 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:06,463 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:06,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:36:06,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,46093,1731663365267 in 266 msec 2024-11-15T09:36:06,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:36:06,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 737 msec 2024-11-15T09:36:06,474 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:06,474 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:36:06,476 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:36:06,476 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,46093,1731663365267, seqNum=-1] 2024-11-15T09:36:06,476 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:36:06,478 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49083, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:36:06,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 809 msec 2024-11-15T09:36:06,487 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663366486, completionTime=-1 2024-11-15T09:36:06,487 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:36:06,487 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:36:06,489 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:36:06,489 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663426489 2024-11-15T09:36:06,489 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663486489 2024-11-15T09:36:06,490 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T09:36:06,490 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39767,1731663365100-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,490 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39767,1731663365100-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,490 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39767,1731663365100-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,490 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:39767, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,490 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,493 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:36:06,500 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,505 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.143sec 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39767,1731663365100-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:06,506 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39767,1731663365100-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:36:06,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1744a862, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:06,508 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,39767,-1 for getting cluster id 2024-11-15T09:36:06,508 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:36:06,509 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:36:06,509 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:36:06,510 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,39767,1731663365100-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,512 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e03c51d2-3fb6-4754-bbc4-c2e69d458a6a' 2024-11-15T09:36:06,513 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:36:06,513 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e03c51d2-3fb6-4754-bbc4-c2e69d458a6a" 2024-11-15T09:36:06,513 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@265325fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:06,513 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,39767,-1] 2024-11-15T09:36:06,514 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:36:06,515 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:06,517 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47508, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:36:06,518 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7982f2bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:06,518 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:36:06,520 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,46093,1731663365267, seqNum=-1] 2024-11-15T09:36:06,521 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:36:06,523 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:36:06,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,39767,1731663365100 2024-11-15T09:36:06,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:06,529 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:36:06,547 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:06,547 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:06,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:06,548 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:06,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:06,548 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:06,548 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:36:06,548 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:06,549 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41295 2024-11-15T09:36:06,551 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41295 connecting to ZooKeeper ensemble=127.0.0.1:58696 2024-11-15T09:36:06,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:06,554 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:06,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412950x0, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:06,576 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-15T09:36:06,576 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:412950x0, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-15T09:36:06,576 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41295-0x1013dd9ff7e0002 connected 2024-11-15T09:36:06,577 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:36:06,580 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:36:06,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:36:06,583 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:06,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41295 2024-11-15T09:36:06,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41295 2024-11-15T09:36:06,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41295 2024-11-15T09:36:06,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41295 2024-11-15T09:36:06,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41295 2024-11-15T09:36:06,590 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(746): ClusterId : e03c51d2-3fb6-4754-bbc4-c2e69d458a6a 2024-11-15T09:36:06,590 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:36:06,597 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:36:06,597 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:36:06,608 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:36:06,609 DEBUG [RS:1;791f12959b23:41295 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66c7d5d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:06,622 DEBUG [RS:1;791f12959b23:41295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;791f12959b23:41295 2024-11-15T09:36:06,622 INFO [RS:1;791f12959b23:41295 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:36:06,622 INFO [RS:1;791f12959b23:41295 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:36:06,622 DEBUG [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:36:06,623 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,39767,1731663365100 with port=41295, startcode=1731663366547 2024-11-15T09:36:06,623 DEBUG [RS:1;791f12959b23:41295 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:36:06,625 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50241, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:36:06,626 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39767 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,41295,1731663366547 2024-11-15T09:36:06,626 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39767 {}] master.ServerManager(517): Registering regionserver=791f12959b23,41295,1731663366547 2024-11-15T09:36:06,627 DEBUG [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54 2024-11-15T09:36:06,628 DEBUG [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42831 2024-11-15T09:36:06,628 DEBUG [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:36:06,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:06,736 DEBUG [RS:1;791f12959b23:41295 {}] zookeeper.ZKUtil(111): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,41295,1731663366547 2024-11-15T09:36:06,736 WARN [RS:1;791f12959b23:41295 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:06,736 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,41295,1731663366547] 2024-11-15T09:36:06,737 INFO [RS:1;791f12959b23:41295 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:06,737 DEBUG [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547 2024-11-15T09:36:06,742 INFO [RS:1;791f12959b23:41295 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:36:06,744 INFO [RS:1;791f12959b23:41295 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:36:06,748 INFO [RS:1;791f12959b23:41295 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:36:06,748 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,749 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:36:06,750 INFO [RS:1;791f12959b23:41295 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:36:06,750 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,750 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,750 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,750 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,750 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,750 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,750 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:06,751 DEBUG [RS:1;791f12959b23:41295 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:06,757 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,757 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,757 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,757 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,757 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,757 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,41295,1731663366547-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:06,775 INFO [RS:1;791f12959b23:41295 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:36:06,775 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,41295,1731663366547-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,775 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,776 INFO [RS:1;791f12959b23:41295 {}] regionserver.Replication(171): 791f12959b23,41295,1731663366547 started 2024-11-15T09:36:06,793 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:06,794 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,41295,1731663366547, RpcServer on 791f12959b23/172.17.0.2:41295, sessionid=0x1013dd9ff7e0002 2024-11-15T09:36:06,794 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:36:06,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;791f12959b23:41295,5,FailOnTimeoutGroup] 2024-11-15T09:36:06,794 DEBUG [RS:1;791f12959b23:41295 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,41295,1731663366547 2024-11-15T09:36:06,794 DEBUG [RS:1;791f12959b23:41295 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,41295,1731663366547' 2024-11-15T09:36:06,794 DEBUG [RS:1;791f12959b23:41295 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:36:06,794 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-15T09:36:06,795 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T09:36:06,795 DEBUG [RS:1;791f12959b23:41295 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:36:06,795 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:36:06,795 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:36:06,795 DEBUG [RS:1;791f12959b23:41295 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,41295,1731663366547 2024-11-15T09:36:06,795 DEBUG [RS:1;791f12959b23:41295 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,41295,1731663366547' 2024-11-15T09:36:06,795 DEBUG [RS:1;791f12959b23:41295 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:36:06,796 DEBUG [RS:1;791f12959b23:41295 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:36:06,796 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 791f12959b23,39767,1731663365100 2024-11-15T09:36:06,796 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@445e0c01 2024-11-15T09:36:06,796 DEBUG [RS:1;791f12959b23:41295 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:36:06,796 INFO [RS:1;791f12959b23:41295 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:36:06,796 INFO [RS:1;791f12959b23:41295 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:36:06,796 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T09:36:06,798 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47512, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T09:36:06,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T09:36:06,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T09:36:06,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:36:06,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T09:36:06,803 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T09:36:06,803 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:06,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-15T09:36:06,805 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T09:36:06,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:36:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741835_1011 (size=393) 2024-11-15T09:36:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741835_1011 (size=393) 2024-11-15T09:36:06,831 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8712c3f1e53da8bd32464272fe8554cc, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54 2024-11-15T09:36:06,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45915 is added to blk_1073741836_1012 (size=76) 2024-11-15T09:36:06,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38481 is added to blk_1073741836_1012 (size=76) 2024-11-15T09:36:06,843 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:06,844 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8712c3f1e53da8bd32464272fe8554cc, disabling compactions & flushes 2024-11-15T09:36:06,844 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:06,844 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:06,844 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. after waiting 0 ms 2024-11-15T09:36:06,844 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:06,844 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:06,844 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8712c3f1e53da8bd32464272fe8554cc: Waiting for close lock at 1731663366844Disabling compacts and flushes for region at 1731663366844Disabling writes for close at 1731663366844Writing region close event to WAL at 1731663366844Closed at 1731663366844 2024-11-15T09:36:06,846 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T09:36:06,847 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731663366846"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663366846"}]},"ts":"1731663366846"} 2024-11-15T09:36:06,850 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T09:36:06,852 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T09:36:06,852 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663366852"}]},"ts":"1731663366852"} 2024-11-15T09:36:06,855 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-15T09:36:06,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8712c3f1e53da8bd32464272fe8554cc, ASSIGN}] 2024-11-15T09:36:06,858 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8712c3f1e53da8bd32464272fe8554cc, ASSIGN 2024-11-15T09:36:06,860 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8712c3f1e53da8bd32464272fe8554cc, ASSIGN; state=OFFLINE, location=791f12959b23,46093,1731663365267; forceNewPlan=false, retain=false 2024-11-15T09:36:06,899 INFO [RS:1;791f12959b23:41295 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C41295%2C1731663366547, suffix=, logDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547, archiveDir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs, maxLogs=32 2024-11-15T09:36:06,900 INFO [RS:1;791f12959b23:41295 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C41295%2C1731663366547.1731663366900 2024-11-15T09:36:06,917 INFO [RS:1;791f12959b23:41295 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 2024-11-15T09:36:06,918 DEBUG [RS:1;791f12959b23:41295 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44921:44921),(127.0.0.1/127.0.0.1:41815:41815)] 2024-11-15T09:36:07,010 INFO [791f12959b23:39767 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T09:36:07,011 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8712c3f1e53da8bd32464272fe8554cc, regionState=OPENING, regionLocation=791f12959b23,46093,1731663365267 2024-11-15T09:36:07,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8712c3f1e53da8bd32464272fe8554cc, ASSIGN because future has completed 2024-11-15T09:36:07,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8712c3f1e53da8bd32464272fe8554cc, server=791f12959b23,46093,1731663365267}] 2024-11-15T09:36:07,173 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:07,173 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8712c3f1e53da8bd32464272fe8554cc, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:07,174 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,174 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:07,174 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,174 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,180 INFO [StoreOpener-8712c3f1e53da8bd32464272fe8554cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,182 INFO [StoreOpener-8712c3f1e53da8bd32464272fe8554cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8712c3f1e53da8bd32464272fe8554cc columnFamilyName info 2024-11-15T09:36:07,182 DEBUG [StoreOpener-8712c3f1e53da8bd32464272fe8554cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:07,183 INFO [StoreOpener-8712c3f1e53da8bd32464272fe8554cc-1 {}] regionserver.HStore(327): Store=8712c3f1e53da8bd32464272fe8554cc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:07,183 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,184 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,187 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,190 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:07,190 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8712c3f1e53da8bd32464272fe8554cc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858101, jitterRate=0.09113305807113647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:36:07,190 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:07,191 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8712c3f1e53da8bd32464272fe8554cc: Running coprocessor pre-open hook at 1731663367174Writing region info on filesystem at 1731663367174Initializing all the Stores at 1731663367176 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663367176Cleaning up temporary data from old regions at 1731663367185 (+9 ms)Running coprocessor post-open hooks at 1731663367190 (+5 ms)Region opened successfully at 1731663367191 (+1 ms) 2024-11-15T09:36:07,192 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc., pid=6, masterSystemTime=1731663367168 2024-11-15T09:36:07,195 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:07,195 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:07,196 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8712c3f1e53da8bd32464272fe8554cc, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,46093,1731663365267 2024-11-15T09:36:07,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8712c3f1e53da8bd32464272fe8554cc, server=791f12959b23,46093,1731663365267 because future has completed 2024-11-15T09:36:07,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T09:36:07,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8712c3f1e53da8bd32464272fe8554cc, server=791f12959b23,46093,1731663365267 in 186 msec 2024-11-15T09:36:07,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T09:36:07,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8712c3f1e53da8bd32464272fe8554cc, ASSIGN in 349 msec 2024-11-15T09:36:07,210 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T09:36:07,211 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663367210"}]},"ts":"1731663367210"} 2024-11-15T09:36:07,214 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-15T09:36:07,215 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T09:36:07,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 415 msec 2024-11-15T09:36:07,640 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:36:07,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:07,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:07,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:07,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:10,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:36:10,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T09:36:10,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T09:36:10,228 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-15T09:36:10,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:36:10,229 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T09:36:10,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:36:10,229 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T09:36:11,770 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-15T09:36:12,677 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:36:12,679 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:12,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:12,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:12,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:16,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39767 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:36:16,838 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-15T09:36:16,838 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-15T09:36:16,842 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T09:36:16,842 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:16,858 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:16,862 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:16,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:16,863 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:16,863 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:36:16,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6030d470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:16,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45d50f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:16,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@495a6aea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-35423-hadoop-hdfs-3_4_1-tests_jar-_-any-1307214160971053946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:16,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5bf227cc{HTTP/1.1, (http/1.1)}{localhost:35423} 2024-11-15T09:36:16,967 INFO [Time-limited test {}] server.Server(415): Started @123400ms 2024-11-15T09:36:16,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:17,010 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:17,014 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:17,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:17,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:17,015 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:36:17,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca82099{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:17,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ed35b1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:17,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1196c8fc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-42121-hadoop-hdfs-3_4_1-tests_jar-_-any-17638658530146225207/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:17,119 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5580c33e{HTTP/1.1, (http/1.1)}{localhost:42121} 2024-11-15T09:36:17,119 INFO [Time-limited test {}] server.Server(415): Started @123552ms 2024-11-15T09:36:17,120 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:17,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:17,165 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:17,166 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:17,166 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:17,166 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:36:17,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fe58b15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:17,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5455501c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:17,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e08dd81{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-36905-hadoop-hdfs-3_4_1-tests_jar-_-any-3307205078712941991/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:17,273 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@178f342a{HTTP/1.1, (http/1.1)}{localhost:36905} 2024-11-15T09:36:17,273 INFO [Time-limited test {}] server.Server(415): Started @123706ms 2024-11-15T09:36:17,274 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:18,328 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:18,328 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:18,347 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:18,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5041e10034b0095 with lease ID 0xcd48484be83b2094: Processing first storage report for DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8 from datanode DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:18,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5041e10034b0095 with lease ID 0xcd48484be83b2094: from storage DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8 node DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:18,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5041e10034b0095 with lease ID 0xcd48484be83b2094: Processing first storage report for DS-920053a4-7f4d-4a68-bd44-41cc048fb376 from datanode DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:18,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5041e10034b0095 with lease ID 0xcd48484be83b2094: from storage DS-920053a4-7f4d-4a68-bd44-41cc048fb376 node DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:18,498 WARN [Thread-877 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data7/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:18,498 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data8/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:18,515 WARN [Thread-829 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:18,517 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x463489a17d34dc97 with lease ID 0xcd48484be83b2095: Processing first storage report for DS-5628fe08-03eb-4269-9471-5517e445cd32 from datanode DatanodeRegistration(127.0.0.1:40111, datanodeUuid=467f7591-42e3-4630-a5a7-f2686c43b608, infoPort=38885, infoSecurePort=0, ipcPort=39247, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x463489a17d34dc97 with lease ID 0xcd48484be83b2095: from storage DS-5628fe08-03eb-4269-9471-5517e445cd32 node DatanodeRegistration(127.0.0.1:40111, datanodeUuid=467f7591-42e3-4630-a5a7-f2686c43b608, infoPort=38885, infoSecurePort=0, ipcPort=39247, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x463489a17d34dc97 with lease ID 0xcd48484be83b2095: Processing first storage report for DS-40509a71-5ddf-40ce-aae1-141e409114e7 from datanode DatanodeRegistration(127.0.0.1:40111, datanodeUuid=467f7591-42e3-4630-a5a7-f2686c43b608, infoPort=38885, infoSecurePort=0, ipcPort=39247, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:18,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x463489a17d34dc97 with lease ID 0xcd48484be83b2095: from storage DS-40509a71-5ddf-40ce-aae1-141e409114e7 node DatanodeRegistration(127.0.0.1:40111, datanodeUuid=467f7591-42e3-4630-a5a7-f2686c43b608, infoPort=38885, infoSecurePort=0, ipcPort=39247, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:18,551 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data9/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:18,551 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data10/current/BP-1651719749-172.17.0.2-1731663362998/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:18,570 WARN [Thread-851 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23fdfce866462d4d with lease ID 0xcd48484be83b2096: Processing first storage report for DS-41528752-22d9-496f-8014-f74b1cb7c0b3 from datanode DatanodeRegistration(127.0.0.1:44981, datanodeUuid=f642bebc-a513-4a80-b479-1ba339126817, infoPort=37387, infoSecurePort=0, ipcPort=45563, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:18,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23fdfce866462d4d with lease ID 0xcd48484be83b2096: from storage DS-41528752-22d9-496f-8014-f74b1cb7c0b3 node DatanodeRegistration(127.0.0.1:44981, datanodeUuid=f642bebc-a513-4a80-b479-1ba339126817, infoPort=37387, infoSecurePort=0, ipcPort=45563, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:18,573 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23fdfce866462d4d with lease ID 0xcd48484be83b2096: Processing first storage report for DS-5729344f-60b2-46a5-91da-01b3ec50dad3 from datanode DatanodeRegistration(127.0.0.1:44981, datanodeUuid=f642bebc-a513-4a80-b479-1ba339126817, infoPort=37387, infoSecurePort=0, ipcPort=45563, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998) 2024-11-15T09:36:18,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23fdfce866462d4d with lease ID 0xcd48484be83b2096: from storage DS-5729344f-60b2-46a5-91da-01b3ec50dad3 node DatanodeRegistration(127.0.0.1:44981, datanodeUuid=f642bebc-a513-4a80-b479-1ba339126817, infoPort=37387, infoSecurePort=0, ipcPort=45563, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:18,617 WARN [ResponseProcessor for block BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,617 WARN [ResponseProcessor for block BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,618 WARN [ResponseProcessor for block BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,618 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:18,617 WARN [ResponseProcessor for block BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,618 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:18,619 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:18,619 WARN [PacketResponder: BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38481] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-599851476_22 at /127.0.0.1:38046 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38046 dst: /127.0.0.1:45915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:33382 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38481:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33382 dst: /127.0.0.1:38481 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-599851476_22 at /127.0.0.1:33430 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38481:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33430 dst: /127.0.0.1:38481 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:38018 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38018 dst: /127.0.0.1:45915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,621 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta block BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:18,621 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1183229914_22 at /127.0.0.1:37968 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37968 dst: /127.0.0.1:45915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,621 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:33398 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38481:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33398 dst: /127.0.0.1:38481 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,622 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1183229914_22 at /127.0.0.1:33350 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38481:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33350 dst: /127.0.0.1:38481 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,623 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:38024 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38024 dst: /127.0.0.1:45915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:18,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@597807df{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:18,626 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@401bd933{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:18,626 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:18,627 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7f19bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:18,627 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcfaad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:18,628 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:18,628 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:18,628 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:18,628 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651719749-172.17.0.2-1731663362998 (Datanode Uuid ddc2173d-f513-4ad4-ac24-8be0bc5778af) service to localhost/127.0.0.1:42831 2024-11-15T09:36:18,628 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data3/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:18,629 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data4/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:18,629 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:18,632 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta block BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,632 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,632 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,632 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d327fd2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:18,638 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c80aceb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:18,638 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:18,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cb9bebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:18,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68a89b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:18,639 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:18,639 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:18,639 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:18,639 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651719749-172.17.0.2-1731663362998 (Datanode Uuid abaf1894-c114-4357-953a-ec1e5c1e419f) service to localhost/127.0.0.1:42831 2024-11-15T09:36:18,640 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data1/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:18,640 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data2/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:18,640 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:18,644 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc., hostname=791f12959b23,46093,1731663365267, seqNum=2] 2024-11-15T09:36:18,646 ERROR [FSHLog-0-hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54-prefix:791f12959b23,46093,1731663365267 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,646 WARN [FSHLog-0-hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54-prefix:791f12959b23,46093,1731663365267 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,646 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C46093%2C1731663365267:(num 1731663365943) roll requested 2024-11-15T09:36:18,647 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.1731663378646 2024-11-15T09:36:18,650 WARN [Thread-899 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,650 WARN [Thread-899 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:18,650 WARN [Thread-899 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741838_1018 2024-11-15T09:36:18,652 WARN [Thread-899 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:18,659 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:18,659 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:18,659 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:18,659 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:18,659 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:18,659 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663378646 2024-11-15T09:36:18,660 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,660 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:18,661 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-15T09:36:18,662 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-15T09:36:18,662 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 2024-11-15T09:36:18,665 WARN [IPC Server handler 3 on default port 42831 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-15T09:36:18,669 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 after 4ms 2024-11-15T09:36:18,677 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38885:38885),(127.0.0.1/127.0.0.1:37387:37387)] 2024-11-15T09:36:18,677 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:18,758 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:19,431 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:20,677 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:20,678 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663378646 2024-11-15T09:36:20,679 WARN [ResponseProcessor for block BP-1651719749-172.17.0.2-1731663362998:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1651719749-172.17.0.2-1731663362998:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:20,679 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663378646 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:20,680 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:34256 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:40111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34256 dst: /127.0.0.1:40111 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:20,680 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:57164 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:44981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57164 dst: /127.0.0.1:44981 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:20,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1196c8fc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:20,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5580c33e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:20,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:20,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ed35b1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:20,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca82099{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:20,725 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:20,725 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:20,725 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:20,725 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651719749-172.17.0.2-1731663362998 (Datanode Uuid 467f7591-42e3-4630-a5a7-f2686c43b608) service to localhost/127.0.0.1:42831 2024-11-15T09:36:20,725 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data7/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:20,725 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data8/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:20,725 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:20,758 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:21,432 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:22,670 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 after 4008ms 2024-11-15T09:36:22,678 WARN [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]] 2024-11-15T09:36:22,678 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:22,678 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C46093%2C1731663365267:(num 1731663378646) roll requested 2024-11-15T09:36:22,678 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.1731663382678 2024-11-15T09:36:22,681 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:22,681 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:22,681 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741840_1022 2024-11-15T09:36:22,682 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:22,684 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38481 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:22,684 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:22,684 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50900 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741841_1023 to mirror 127.0.0.1:38481 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:22,684 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741841_1023 2024-11-15T09:36:22,685 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50900 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T09:36:22,685 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50900 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50900 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:22,685 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:22,688 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:22,688 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:34184 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data10]'}, localName='127.0.0.1:44981', datanodeUuid='f642bebc-a513-4a80-b479-1ba339126817', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741842_1024 to mirror 127.0.0.1:40111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:22,688 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:22,688 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741842_1024 2024-11-15T09:36:22,688 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:34184 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T09:36:22,688 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:34184 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34184 dst: /127.0.0.1:44981 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:22,689 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:22,695 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:22,696 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:22,696 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:22,696 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:22,696 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:22,696 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663378646 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663382678 2024-11-15T09:36:22,697 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37169:37169),(127.0.0.1/127.0.0.1:37387:37387)] 2024-11-15T09:36:22,697 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:22,697 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663378646 is not closed yet, will try archiving it next time 2024-11-15T09:36:22,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44981 is added to blk_1073741839_1021 (size=2431) 2024-11-15T09:36:22,730 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T09:36:22,758 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:23,099 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:23,432 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,697 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,735 WARN [ResponseProcessor for block BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,735 WARN [DataStreamer for file /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663382678 block BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:24,735 WARN [PacketResponder: BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44981] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:24,735 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50904 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50904 dst: /127.0.0.1:33147 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:24,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:34188 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:44981:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34188 dst: /127.0.0.1:44981 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:24,759 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e08dd81{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:24,844 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@178f342a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:24,844 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:24,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5455501c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:24,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fe58b15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:24,846 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:24,846 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:24,846 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:24,846 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651719749-172.17.0.2-1731663362998 (Datanode Uuid f642bebc-a513-4a80-b479-1ba339126817) service to localhost/127.0.0.1:42831 2024-11-15T09:36:24,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data9/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:24,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data10/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:24,848 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:24,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46093 {}] regionserver.HRegion(8855): Flush requested on 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:24,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:36:24,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/cc48e858c4ae4a9ca3e22a8ba671c169 is 1080, key is row0002/info:/1731663380727/Put/seqid=0 2024-11-15T09:36:24,881 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,881 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:24,881 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741844_1027 2024-11-15T09:36:24,882 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:24,883 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,883 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:24,883 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741845_1028 2024-11-15T09:36:24,884 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:24,886 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,886 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:24,886 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741846_1029 2024-11-15T09:36:24,886 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:24,889 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45915 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:24,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50930 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741847_1030 to mirror 127.0.0.1:45915 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:24,889 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:24,889 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741847_1030 2024-11-15T09:36:24,889 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50930 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:24,889 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50930 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50930 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:24,890 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:24,890 WARN [IPC Server handler 0 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:24,891 WARN [IPC Server handler 0 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:24,891 WARN [IPC Server handler 0 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:24,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741848_1031 (size=10347) 2024-11-15T09:36:25,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/cc48e858c4ae4a9ca3e22a8ba671c169 2024-11-15T09:36:25,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/cc48e858c4ae4a9ca3e22a8ba671c169 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/cc48e858c4ae4a9ca3e22a8ba671c169 2024-11-15T09:36:25,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/cc48e858c4ae4a9ca3e22a8ba671c169, entries=5, sequenceid=11, filesize=10.1 K 2024-11-15T09:36:25,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8712c3f1e53da8bd32464272fe8554cc in 455ms, sequenceid=11, compaction requested=false 2024-11-15T09:36:25,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:25,432 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46093 {}] regionserver.HRegion(8855): Flush requested on 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:25,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-15T09:36:25,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/315b5cbb927a4c1fb8af34a15cf5cb57 is 1080, key is row0007/info:/1731663384860/Put/seqid=0 2024-11-15T09:36:25,506 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:25,506 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:25,506 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741849_1032 2024-11-15T09:36:25,507 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:25,509 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44981 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:25,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50962 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741850_1033 to mirror 127.0.0.1:44981 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:25,509 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:25,509 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741850_1033 2024-11-15T09:36:25,509 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50962 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:25,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50962 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50962 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:25,510 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:25,512 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45915 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:25,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50976 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741851_1034 to mirror 127.0.0.1:45915 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:25,512 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:25,512 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741851_1034 2024-11-15T09:36:25,512 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50976 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:25,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50976 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50976 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:25,513 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:25,515 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:25,515 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50982 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741852_1035 to mirror 127.0.0.1:40111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:25,515 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:25,515 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741852_1035 2024-11-15T09:36:25,515 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50982 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:25,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:50982 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50982 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:25,516 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:25,517 WARN [IPC Server handler 3 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:25,517 WARN [IPC Server handler 3 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:25,517 WARN [IPC Server handler 3 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:25,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741853_1036 (size=12506) 2024-11-15T09:36:25,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/315b5cbb927a4c1fb8af34a15cf5cb57 2024-11-15T09:36:25,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/315b5cbb927a4c1fb8af34a15cf5cb57 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57 2024-11-15T09:36:25,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57, entries=7, sequenceid=24, filesize=12.2 K 2024-11-15T09:36:25,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8712c3f1e53da8bd32464272fe8554cc in 436ms, sequenceid=24, compaction requested=false 2024-11-15T09:36:25,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:25,936 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-15T09:36:25,936 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:25,936 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57 because midkey is the same as first or last row 2024-11-15T09:36:26,698 WARN [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]] 2024-11-15T09:36:26,698 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,698 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C46093%2C1731663365267:(num 1731663382678) roll requested 2024-11-15T09:36:26,699 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.1731663386699 2024-11-15T09:36:26,703 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,703 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:26,703 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741854_1037 2024-11-15T09:36:26,704 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:26,705 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,705 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:26,705 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741855_1038 2024-11-15T09:36:26,706 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:26,707 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,707 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:26,708 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741856_1039 2024-11-15T09:36:26,708 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:26,710 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,710 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:26,710 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741857_1040 2024-11-15T09:36:26,710 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:26,711 WARN [IPC Server handler 4 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:26,711 WARN [IPC Server handler 4 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:26,711 WARN [IPC Server handler 4 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:26,714 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:26,714 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:26,714 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:26,714 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:26,714 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:26,715 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663382678 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663386699 2024-11-15T09:36:26,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741843_1026 (size=24824) 2024-11-15T09:36:26,718 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37169:37169)] 2024-11-15T09:36:26,718 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:26,718 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663382678 is not closed yet, will try archiving it next time 2024-11-15T09:36:26,718 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663378646 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs/791f12959b23%2C46093%2C1731663365267.1731663378646 2024-11-15T09:36:26,759 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46093 {}] regionserver.HRegion(8855): Flush requested on 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:26,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T09:36:26,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/3ef269cba08048dfbd1f6c2bbe74faf9 is 1079, key is tmprow/info:/1731663386921/Put/seqid=0 2024-11-15T09:36:26,932 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,932 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:26,933 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741859_1042 2024-11-15T09:36:26,933 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:26,937 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38481 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,937 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51024 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741860_1043 to mirror 127.0.0.1:38481 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:26,937 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:26,937 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741860_1043 2024-11-15T09:36:26,937 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51024 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:26,937 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51024 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51024 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:26,938 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:26,940 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,940 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:26,940 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741861_1044 2024-11-15T09:36:26,941 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:26,943 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:26,943 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:26,943 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741862_1045 2024-11-15T09:36:26,943 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:26,944 WARN [IPC Server handler 0 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:26,944 WARN [IPC Server handler 0 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:26,944 WARN [IPC Server handler 0 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:26,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741863_1046 (size=6027) 2024-11-15T09:36:27,118 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:27,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/3ef269cba08048dfbd1f6c2bbe74faf9 2024-11-15T09:36:27,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/3ef269cba08048dfbd1f6c2bbe74faf9 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/3ef269cba08048dfbd1f6c2bbe74faf9 2024-11-15T09:36:27,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/3ef269cba08048dfbd1f6c2bbe74faf9, entries=1, sequenceid=34, filesize=5.9 K 2024-11-15T09:36:27,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8712c3f1e53da8bd32464272fe8554cc in 446ms, sequenceid=34, compaction requested=true 2024-11-15T09:36:27,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:27,369 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-15T09:36:27,369 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:27,369 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57 because midkey is the same as first or last row 2024-11-15T09:36:27,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8712c3f1e53da8bd32464272fe8554cc:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:36:27,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:36:27,370 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:36:27,371 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:36:27,371 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HStore(1541): 8712c3f1e53da8bd32464272fe8554cc/info is initiating minor compaction (all files) 2024-11-15T09:36:27,371 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8712c3f1e53da8bd32464272fe8554cc/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:27,371 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/cc48e858c4ae4a9ca3e22a8ba671c169, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/3ef269cba08048dfbd1f6c2bbe74faf9] into tmpdir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp, totalSize=28.2 K 2024-11-15T09:36:27,372 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc48e858c4ae4a9ca3e22a8ba671c169, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731663380727 2024-11-15T09:36:27,372 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.Compactor(225): Compacting 315b5cbb927a4c1fb8af34a15cf5cb57, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731663384860 2024-11-15T09:36:27,373 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3ef269cba08048dfbd1f6c2bbe74faf9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731663386921 2024-11-15T09:36:27,389 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8712c3f1e53da8bd32464272fe8554cc#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:36:27,389 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/86e7991e867c45078896a2d7df855b73 is 1080, key is row0002/info:/1731663380727/Put/seqid=0 2024-11-15T09:36:27,392 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38481 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:27,392 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51060 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741864_1047 to mirror 127.0.0.1:38481 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:27,392 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:27,392 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51060 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:27,392 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741864_1047 2024-11-15T09:36:27,393 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51060 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51060 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:27,393 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:27,394 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:27,395 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:27,395 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741865_1048 2024-11-15T09:36:27,395 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:27,397 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:27,397 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:27,397 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741866_1049 2024-11-15T09:36:27,398 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:27,401 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45915 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:27,401 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:27,401 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741867_1050 2024-11-15T09:36:27,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51074 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741867_1050 to mirror 127.0.0.1:45915 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:27,402 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:27,403 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51074 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:27,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51074 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51074 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:27,403 WARN [IPC Server handler 4 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:27,403 WARN [IPC Server handler 4 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:27,404 WARN [IPC Server handler 4 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:27,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741868_1051 (size=17994) 2024-11-15T09:36:27,433 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:27,815 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/86e7991e867c45078896a2d7df855b73 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 2024-11-15T09:36:27,823 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8712c3f1e53da8bd32464272fe8554cc/info of 8712c3f1e53da8bd32464272fe8554cc into 86e7991e867c45078896a2d7df855b73(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:36:27,823 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:27,823 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc., storeName=8712c3f1e53da8bd32464272fe8554cc/info, priority=13, startTime=1731663387369; duration=0sec 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 because midkey is the same as first or last row 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 because midkey is the same as first or last row 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 because midkey is the same as first or last row 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:36:27,824 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8712c3f1e53da8bd32464272fe8554cc:info 2024-11-15T09:36:28,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46093 {}] regionserver.HRegion(8855): Flush requested on 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:28,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T09:36:28,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/81da34641bc445d5b12062c903c0c9b8 is 1079, key is tmprow/info:/1731663388342/Put/seqid=0 2024-11-15T09:36:28,352 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,352 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:28,352 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741869_1052 2024-11-15T09:36:28,353 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:28,354 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,354 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:28,355 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741870_1053 2024-11-15T09:36:28,355 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:28,356 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,357 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:28,357 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741871_1054 2024-11-15T09:36:28,357 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7d263139[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741848_1031 to 127.0.0.1:38481 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:28,357 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b7ed574[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741853_1036 to 127.0.0.1:40111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:28,357 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:28,359 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,359 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:28,359 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741872_1055 2024-11-15T09:36:28,360 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:28,360 WARN [IPC Server handler 2 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:28,360 WARN [IPC Server handler 2 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:28,360 WARN [IPC Server handler 2 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741873_1056 (size=6027) 2024-11-15T09:36:28,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/81da34641bc445d5b12062c903c0c9b8 2024-11-15T09:36:28,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/81da34641bc445d5b12062c903c0c9b8 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/81da34641bc445d5b12062c903c0c9b8 2024-11-15T09:36:28,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/81da34641bc445d5b12062c903c0c9b8, entries=1, sequenceid=45, filesize=5.9 K 2024-11-15T09:36:28,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8712c3f1e53da8bd32464272fe8554cc in 35ms, sequenceid=45, compaction requested=false 2024-11-15T09:36:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-15T09:36:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 because midkey is the same as first or last row 2024-11-15T09:36:28,718 WARN [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]] 2024-11-15T09:36:28,719 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,719 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C46093%2C1731663365267:(num 1731663386699) roll requested 2024-11-15T09:36:28,719 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.1731663388719 2024-11-15T09:36:28,723 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,723 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:28,723 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741874_1057 2024-11-15T09:36:28,724 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:28,726 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,726 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:28,726 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741875_1058 2024-11-15T09:36:28,727 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:28,729 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45915 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51112 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741876_1059 to mirror 127.0.0.1:45915 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:28,730 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:28,730 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741876_1059 2024-11-15T09:36:28,730 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51112 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T09:36:28,730 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51112 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51112 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:28,731 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:28,732 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:28,733 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:28,733 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741877_1060 2024-11-15T09:36:28,734 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:28,735 WARN [IPC Server handler 0 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:28,735 WARN [IPC Server handler 0 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:28,735 WARN [IPC Server handler 0 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:28,738 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:28,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:28,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:28,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:28,738 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:28,739 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663386699 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663388719 2024-11-15T09:36:28,739 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37169:37169)] 2024-11-15T09:36:28,739 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:28,739 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663386699 is not closed yet, will try archiving it next time 2024-11-15T09:36:28,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663382678 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs/791f12959b23%2C46093%2C1731663365267.1731663382678 2024-11-15T09:36:28,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741858_1041 (size=13591) 2024-11-15T09:36:28,759 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:29,141 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 is not closed yet, will try archiving it next time 2024-11-15T09:36:29,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7d263139[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741863_1046 to 127.0.0.1:44981 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b7ed574[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741843_1026 to 127.0.0.1:40111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,433 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46093 {}] regionserver.HRegion(8855): Flush requested on 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:29,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T09:36:29,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/a715c70c255147c8802c2a6d80827ec0 is 1079, key is tmprow/info:/1731663389761/Put/seqid=0 2024-11-15T09:36:29,772 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51124 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741879_1062 to mirror 127.0.0.1:40111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,773 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:29,773 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:29,773 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51124 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:29,773 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741879_1062 2024-11-15T09:36:29,773 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51124 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51124 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,774 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:29,776 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38481 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:29,776 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51128 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741880_1063 to mirror 127.0.0.1:38481 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,776 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:29,776 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741880_1063 2024-11-15T09:36:29,776 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51128 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:29,776 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51128 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51128 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,777 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:29,778 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:29,779 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:29,779 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741881_1064 2024-11-15T09:36:29,779 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:29,782 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45915 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:29,782 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51136 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741882_1065 to mirror 127.0.0.1:45915 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,783 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:29,783 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741882_1065 2024-11-15T09:36:29,783 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51136 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:29,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:51136 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51136 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:29,783 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:29,784 WARN [IPC Server handler 2 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:29,784 WARN [IPC Server handler 2 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:29,784 WARN [IPC Server handler 2 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:29,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741883_1066 (size=6027) 2024-11-15T09:36:30,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/a715c70c255147c8802c2a6d80827ec0 2024-11-15T09:36:30,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/a715c70c255147c8802c2a6d80827ec0 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/a715c70c255147c8802c2a6d80827ec0 2024-11-15T09:36:30,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/a715c70c255147c8802c2a6d80827ec0, entries=1, sequenceid=55, filesize=5.9 K 2024-11-15T09:36:30,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8712c3f1e53da8bd32464272fe8554cc in 441ms, sequenceid=55, compaction requested=true 2024-11-15T09:36:30,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:30,204 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-15T09:36:30,204 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:30,204 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 because midkey is the same as first or last row 2024-11-15T09:36:30,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8712c3f1e53da8bd32464272fe8554cc:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:36:30,204 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:36:30,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:36:30,206 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:36:30,206 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HStore(1541): 8712c3f1e53da8bd32464272fe8554cc/info is initiating minor compaction (all files) 2024-11-15T09:36:30,206 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8712c3f1e53da8bd32464272fe8554cc/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:30,206 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/81da34641bc445d5b12062c903c0c9b8, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/a715c70c255147c8802c2a6d80827ec0] into tmpdir=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp, totalSize=29.3 K 2024-11-15T09:36:30,207 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.Compactor(225): Compacting 86e7991e867c45078896a2d7df855b73, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731663380727 2024-11-15T09:36:30,207 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81da34641bc445d5b12062c903c0c9b8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731663388342 2024-11-15T09:36:30,207 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] compactions.Compactor(225): Compacting a715c70c255147c8802c2a6d80827ec0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731663389761 2024-11-15T09:36:30,235 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8712c3f1e53da8bd32464272fe8554cc#info#compaction#24 average throughput is 1.03 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:36:30,236 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/9fba1e34001c458f91396908fc39056b is 1080, key is row0002/info:/1731663380727/Put/seqid=0 2024-11-15T09:36:30,238 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:30,238 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:30,238 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741884_1067 2024-11-15T09:36:30,239 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:30,240 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:30,241 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:30,241 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741885_1068 2024-11-15T09:36:30,241 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:30,243 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:30,243 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:30,243 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741886_1069 2024-11-15T09:36:30,244 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:30,246 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:30,246 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]) is bad. 2024-11-15T09:36:30,246 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741887_1070 2024-11-15T09:36:30,247 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38481,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK] 2024-11-15T09:36:30,248 WARN [IPC Server handler 2 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T09:36:30,248 WARN [IPC Server handler 2 on default port 42831 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T09:36:30,248 WARN [IPC Server handler 2 on default port 42831 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T09:36:30,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741888_1071 (size=18097) 2024-11-15T09:36:30,271 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/9fba1e34001c458f91396908fc39056b as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/9fba1e34001c458f91396908fc39056b 2024-11-15T09:36:30,280 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8712c3f1e53da8bd32464272fe8554cc/info of 8712c3f1e53da8bd32464272fe8554cc into 9fba1e34001c458f91396908fc39056b(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:30,280 INFO [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc., storeName=8712c3f1e53da8bd32464272fe8554cc/info, priority=13, startTime=1731663390204; duration=0sec 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/9fba1e34001c458f91396908fc39056b because midkey is the same as first or last row 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/9fba1e34001c458f91396908fc39056b because midkey is the same as first or last row 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/9fba1e34001c458f91396908fc39056b because midkey is the same as first or last row 2024-11-15T09:36:30,280 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:36:30,281 DEBUG [RS:0;791f12959b23:46093-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8712c3f1e53da8bd32464272fe8554cc:info 2024-11-15T09:36:30,739 WARN [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-15T09:36:30,740 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:30,760 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:30,785 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:30,790 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:30,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:30,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:30,793 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:36:30,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@182fe9c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:30,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@366bb257{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:30,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@434810ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/java.io.tmpdir/jetty-localhost-46709-hadoop-hdfs-3_4_1-tests_jar-_-any-17636295533650318636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:30,915 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@381443d3{HTTP/1.1, (http/1.1)}{localhost:46709} 2024-11-15T09:36:30,915 INFO [Time-limited test {}] server.Server(415): Started @137348ms 2024-11-15T09:36:30,916 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:31,351 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7d263139[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741873_1056 to 127.0.0.1:45915 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:31,351 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b7ed574[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741868_1051 to 127.0.0.1:40111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:31,434 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:31,583 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:31,594 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fbf14245b3057cf with lease ID 0xcd48484be83b2097: from storage DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e node DatanodeRegistration(127.0.0.1:45393, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=42301, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:31,594 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fbf14245b3057cf with lease ID 0xcd48484be83b2097: from storage DS-b185c7f9-f7be-4bd7-ad9a-d8806a9fc06f node DatanodeRegistration(127.0.0.1:45393, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=42301, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T09:36:32,350 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b7ed574[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741858_1041 to 127.0.0.1:44981 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:32,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741883_1066 (size=6027) 2024-11-15T09:36:32,740 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:32,760 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:33,434 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:34,351 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7d263139[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33147, datanodeUuid=4397d9d1-004a-4c14-84eb-4dc6d5e87712, infoPort=37169, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741888_1071 to 127.0.0.1:40111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:34,740 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:34,760 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,070 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:36:35,434 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,682 ERROR [FSHLog-0-hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData-prefix:791f12959b23,39767,1731663365100 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,682 WARN [FSHLog-0-hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData-prefix:791f12959b23,39767,1731663365100 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,682 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C39767%2C1731663365100:(num 1731663365457) roll requested 2024-11-15T09:36:35,682 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C39767%2C1731663365100.1731663395682 2024-11-15T09:36:35,685 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,685 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]) is bad. 2024-11-15T09:36:35,686 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741889_1072 2024-11-15T09:36:35,686 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK] 2024-11-15T09:36:35,688 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,688 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK], DatanodeInfoWithStorage[127.0.0.1:45393,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK]) is bad. 2024-11-15T09:36:35,688 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741890_1073 2024-11-15T09:36:35,689 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40111,DS-5628fe08-03eb-4269-9471-5517e445cd32,DISK] 2024-11-15T09:36:35,692 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44981 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,691 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1183229914_22 at /127.0.0.1:56454 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741891_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741891_1074 to mirror 127.0.0.1:44981 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:35,692 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741891_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:35,692 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1183229914_22 at /127.0.0.1:56454 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741891_1074] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T09:36:35,692 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741891_1074 2024-11-15T09:36:35,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1183229914_22 at /127.0.0.1:56454 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741891_1074] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56454 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:35,693 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:35,697 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:35,697 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:35,698 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:35,698 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:35,698 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:35,698 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663395682 2024-11-15T09:36:35,698 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,699 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:35,699 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 2024-11-15T09:36:35,699 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37169:37169),(127.0.0.1/127.0.0.1:42301:42301)] 2024-11-15T09:36:35,699 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 is not closed yet, will try archiving it next time 2024-11-15T09:36:35,699 WARN [IPC Server handler 4 on default port 42831 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741830_1006 2024-11-15T09:36:35,700 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 after 1ms 2024-11-15T09:36:36,741 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:36,761 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:38,741 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:38,761 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:39,701 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 after 4002ms 2024-11-15T09:36:40,741 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:40,762 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:41,610 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6695c07 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1651719749-172.17.0.2-1731663362998:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:45915,null,null]) java.net.ConnectException: Call From 791f12959b23/172.17.0.2 to localhost:36911 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T09:36:41,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741833_1020 (size=455) 2024-11-15T09:36:41,688 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663365943 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs/791f12959b23%2C46093%2C1731663365267.1731663365943 2024-11-15T09:36:41,690 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663386699 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs/791f12959b23%2C46093%2C1731663365267.1731663386699 2024-11-15T09:36:42,742 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:42,762 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:43,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741833_1020 (size=455) 2024-11-15T09:36:44,574 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.1731663404574 2024-11-15T09:36:44,578 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,578 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:45393,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:44,578 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741893_1077 2024-11-15T09:36:44,579 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:44,587 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,587 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,588 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663388719 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663404574 2024-11-15T09:36:44,588 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37169:37169),(127.0.0.1/127.0.0.1:42301:42301)] 2024-11-15T09:36:44,589 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663388719 is not closed yet, will try archiving it next time 2024-11-15T09:36:44,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741878_1061 (size=12911) 2024-11-15T09:36:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46093 {}] regionserver.HRegion(8855): Flush requested on 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:44,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T09:36:44,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/8db75346debe44feb37d7d7be0514d5b is 1080, key is row0013/info:/1731663404590/Put/seqid=0 2024-11-15T09:36:44,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741895_1079 (size=9267) 2024-11-15T09:36:44,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741895_1079 (size=9267) 2024-11-15T09:36:44,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/8db75346debe44feb37d7d7be0514d5b 2024-11-15T09:36:44,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/8db75346debe44feb37d7d7be0514d5b as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/8db75346debe44feb37d7d7be0514d5b 2024-11-15T09:36:44,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/8db75346debe44feb37d7d7be0514d5b, entries=4, sequenceid=66, filesize=9.0 K 2024-11-15T09:36:44,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 8712c3f1e53da8bd32464272fe8554cc in 39ms, sequenceid=66, compaction requested=false 2024-11-15T09:36:44,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8712c3f1e53da8bd32464272fe8554cc: 2024-11-15T09:36:44,634 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-15T09:36:44,634 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:36:44,634 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/9fba1e34001c458f91396908fc39056b because midkey is the same as first or last row 2024-11-15T09:36:44,742 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-15T09:36:44,742 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,762 INFO [regionserver/791f12959b23:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:36:44,816 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:36:44,817 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:44,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:44,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:44,817 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:36:44,817 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:36:44,817 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1609523067, stopped=false 2024-11-15T09:36:44,817 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,39767,1731663365100 2024-11-15T09:36:44,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:44,888 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:44,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:44,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:44,888 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:44,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:44,888 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:36:44,888 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:36:44,888 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:44,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:44,889 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,46093,1731663365267' ***** 2024-11-15T09:36:44,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:44,889 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:36:44,889 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,41295,1731663366547' ***** 2024-11-15T09:36:44,889 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:36:44,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:44,889 INFO [RS:0;791f12959b23:46093 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:36:44,889 INFO [RS:1;791f12959b23:41295 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:36:44,889 INFO [RS:1;791f12959b23:41295 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:36:44,889 INFO [RS:0;791f12959b23:46093 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:36:44,889 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:36:44,889 INFO [RS:1;791f12959b23:41295 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:36:44,889 INFO [RS:0;791f12959b23:46093 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:36:44,889 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,41295,1731663366547 2024-11-15T09:36:44,889 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:36:44,889 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(3091): Received CLOSE for 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:44,889 INFO [RS:1;791f12959b23:41295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:36:44,890 INFO [RS:1;791f12959b23:41295 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;791f12959b23:41295. 2024-11-15T09:36:44,890 DEBUG [RS:1;791f12959b23:41295 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:44,890 DEBUG [RS:1;791f12959b23:41295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:44,890 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,41295,1731663366547; all regions closed. 2024-11-15T09:36:44,890 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:44,890 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,890 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,46093,1731663365267 2024-11-15T09:36:44,890 INFO [RS:0;791f12959b23:46093 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:36:44,890 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,891 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,891 INFO [RS:0;791f12959b23:46093 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:46093. 2024-11-15T09:36:44,891 DEBUG [RS:0;791f12959b23:46093 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8712c3f1e53da8bd32464272fe8554cc, disabling compactions & flushes 2024-11-15T09:36:44,891 DEBUG [RS:0;791f12959b23:46093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:44,891 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:44,891 INFO [RS:0;791f12959b23:46093 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:36:44,891 INFO [RS:0;791f12959b23:46093 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. after waiting 0 ms 2024-11-15T09:36:44,891 INFO [RS:0;791f12959b23:46093 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:44,891 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:36:44,891 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8712c3f1e53da8bd32464272fe8554cc 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-15T09:36:44,891 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T09:36:44,891 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1325): Online Regions={8712c3f1e53da8bd32464272fe8554cc=TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc., 1588230740=hbase:meta,,1.1588230740} 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:36:44,891 DEBUG [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8712c3f1e53da8bd32464272fe8554cc 2024-11-15T09:36:44,891 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:36:44,891 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:36:44,891 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-15T09:36:44,892 ERROR [FSHLog-0-hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54-prefix:791f12959b23,46093,1731663365267.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,892 WARN [FSHLog-0-hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54-prefix:791f12959b23,46093,1731663365267.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,892 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C46093%2C1731663365267.meta:.meta(num 1731663366363) roll requested 2024-11-15T09:36:44,892 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C46093%2C1731663365267.meta.1731663404892.meta 2024-11-15T09:36:44,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,893 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,894 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,895 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,895 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 2024-11-15T09:36:44,895 WARN [IPC Server handler 0 on default port 42831 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-15T09:36:44,895 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 after 0ms 2024-11-15T09:36:44,896 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,896 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/c14c6eb34e954f9c8787892d4fb79aa3 is 1080, key is row0016/info:/1731663404597/Put/seqid=0 2024-11-15T09:36:44,896 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:44,896 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741896_1081 2024-11-15T09:36:44,897 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:44,898 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,898 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:45393,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:44,898 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741897_1082 2024-11-15T09:36:44,899 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:44,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,901 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,901 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:44,901 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663404892.meta 2024-11-15T09:36:44,909 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,909 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45915,DS-f98b315c-1dd2-4482-8677-bcb94341761d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,909 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta 2024-11-15T09:36:44,909 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37169:37169),(127.0.0.1/127.0.0.1:42301:42301)] 2024-11-15T09:36:44,909 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta is not closed yet, will try archiving it next time 2024-11-15T09:36:44,910 WARN [IPC Server handler 3 on default port 42831 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta has not been closed. Lease recovery is in progress. RecoveryId = 1085 for block blk_1073741834_1010 2024-11-15T09:36:44,910 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta after 1ms 2024-11-15T09:36:44,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741899_1084 (size=13583) 2024-11-15T09:36:44,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741899_1084 (size=13583) 2024-11-15T09:36:44,913 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/c14c6eb34e954f9c8787892d4fb79aa3 2024-11-15T09:36:44,920 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/.tmp/info/c14c6eb34e954f9c8787892d4fb79aa3 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/c14c6eb34e954f9c8787892d4fb79aa3 2024-11-15T09:36:44,927 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/c14c6eb34e954f9c8787892d4fb79aa3, entries=8, sequenceid=77, filesize=13.3 K 2024-11-15T09:36:44,928 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 8712c3f1e53da8bd32464272fe8554cc in 37ms, sequenceid=77, compaction requested=true 2024-11-15T09:36:44,929 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/cc48e858c4ae4a9ca3e22a8ba671c169, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/3ef269cba08048dfbd1f6c2bbe74faf9, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/81da34641bc445d5b12062c903c0c9b8, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/a715c70c255147c8802c2a6d80827ec0] to archive 2024-11-15T09:36:44,930 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T09:36:44,932 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/info/7d9efc362c0e41168b0ebeba6ed2c2fe is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc./info:regioninfo/1731663367196/Put/seqid=0 2024-11-15T09:36:44,932 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/cc48e858c4ae4a9ca3e22a8ba671c169 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/cc48e858c4ae4a9ca3e22a8ba671c169 2024-11-15T09:36:44,934 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/315b5cbb927a4c1fb8af34a15cf5cb57 2024-11-15T09:36:44,935 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44981 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:48218 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6]'}, localName='127.0.0.1:33147', datanodeUuid='4397d9d1-004a-4c14-84eb-4dc6d5e87712', xmitsInProgress=0}:Exception transferring block BP-1651719749-172.17.0.2-1731663362998:blk_1073741900_1086 to mirror 127.0.0.1:44981 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:44,935 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK], DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:44,935 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741900_1086 2024-11-15T09:36:44,935 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:48218 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T09:36:44,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1842751138_22 at /127.0.0.1:48218 [Receiving block BP-1651719749-172.17.0.2-1731663362998:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:33147:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48218 dst: /127.0.0.1:33147 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:44,936 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:44,937 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/86e7991e867c45078896a2d7df855b73 2024-11-15T09:36:44,939 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/3ef269cba08048dfbd1f6c2bbe74faf9 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/3ef269cba08048dfbd1f6c2bbe74faf9 2024-11-15T09:36:44,940 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/81da34641bc445d5b12062c903c0c9b8 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/81da34641bc445d5b12062c903c0c9b8 2024-11-15T09:36:44,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741901_1087 (size=7089) 2024-11-15T09:36:44,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741901_1087 (size=7089) 2024-11-15T09:36:44,942 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/info/7d9efc362c0e41168b0ebeba6ed2c2fe 2024-11-15T09:36:44,942 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/a715c70c255147c8802c2a6d80827ec0 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/info/a715c70c255147c8802c2a6d80827ec0 2024-11-15T09:36:44,943 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=791f12959b23:39767 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T09:36:44,943 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cc48e858c4ae4a9ca3e22a8ba671c169=10347, 315b5cbb927a4c1fb8af34a15cf5cb57=12506, 86e7991e867c45078896a2d7df855b73=17994, 3ef269cba08048dfbd1f6c2bbe74faf9=6027, 81da34641bc445d5b12062c903c0c9b8=6027, a715c70c255147c8802c2a6d80827ec0=6027] 2024-11-15T09:36:44,947 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8712c3f1e53da8bd32464272fe8554cc/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-15T09:36:44,948 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:44,948 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8712c3f1e53da8bd32464272fe8554cc: Waiting for close lock at 1731663404890Running coprocessor pre-close hooks at 1731663404890Disabling compacts and flushes for region at 1731663404890Disabling writes for close at 1731663404891 (+1 ms)Obtaining lock to block concurrent updates at 1731663404891Preparing flush snapshotting stores in 8712c3f1e53da8bd32464272fe8554cc at 1731663404891Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731663404891Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. at 1731663404892 (+1 ms)Flushing 8712c3f1e53da8bd32464272fe8554cc/info: creating writer at 1731663404892Flushing 8712c3f1e53da8bd32464272fe8554cc/info: appending metadata at 1731663404896 (+4 ms)Flushing 8712c3f1e53da8bd32464272fe8554cc/info: closing flushed file at 1731663404896Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@484af474: reopening flushed file at 1731663404919 (+23 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 8712c3f1e53da8bd32464272fe8554cc in 37ms, sequenceid=77, compaction requested=true at 1731663404928 (+9 ms)Writing region close event to WAL at 1731663404943 (+15 ms)Running coprocessor post-close hooks at 1731663404948 (+5 ms)Closed at 1731663404948 2024-11-15T09:36:44,949 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731663366799.8712c3f1e53da8bd32464272fe8554cc. 2024-11-15T09:36:44,963 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/ns/b8cddeb8510c4aaaaa25f7b46b7d448b is 43, key is default/ns:d/1731663366478/Put/seqid=0 2024-11-15T09:36:44,965 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,966 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:45393,DS-a81e1d03-1c35-4f73-96b9-57c44ac7e48e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:44,966 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741902_1088 2024-11-15T09:36:44,966 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741903_1089 (size=5153) 2024-11-15T09:36:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741903_1089 (size=5153) 2024-11-15T09:36:44,971 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/ns/b8cddeb8510c4aaaaa25f7b46b7d448b 2024-11-15T09:36:44,991 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.1731663388719 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs/791f12959b23%2C46093%2C1731663365267.1731663388719 2024-11-15T09:36:44,997 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/table/c9572fda9e9a44a8906c6daaafcd4da2 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731663367210/Put/seqid=0 2024-11-15T09:36:44,999 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:36:44,999 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1651719749-172.17.0.2-1731663362998:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK], DatanodeInfoWithStorage[127.0.0.1:33147,DS-18791c09-8d6d-4b04-8efa-e9355a38e1a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK]) is bad. 2024-11-15T09:36:44,999 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-1651719749-172.17.0.2-1731663362998:blk_1073741904_1090 2024-11-15T09:36:45,000 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44981,DS-41528752-22d9-496f-8014-f74b1cb7c0b3,DISK] 2024-11-15T09:36:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741905_1091 (size=5424) 2024-11-15T09:36:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741905_1091 (size=5424) 2024-11-15T09:36:45,005 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/table/c9572fda9e9a44a8906c6daaafcd4da2 2024-11-15T09:36:45,014 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/info/7d9efc362c0e41168b0ebeba6ed2c2fe as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/info/7d9efc362c0e41168b0ebeba6ed2c2fe 2024-11-15T09:36:45,022 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/info/7d9efc362c0e41168b0ebeba6ed2c2fe, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T09:36:45,023 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/ns/b8cddeb8510c4aaaaa25f7b46b7d448b as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/ns/b8cddeb8510c4aaaaa25f7b46b7d448b 2024-11-15T09:36:45,031 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/ns/b8cddeb8510c4aaaaa25f7b46b7d448b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T09:36:45,032 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/.tmp/table/c9572fda9e9a44a8906c6daaafcd4da2 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/table/c9572fda9e9a44a8906c6daaafcd4da2 2024-11-15T09:36:45,040 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/table/c9572fda9e9a44a8906c6daaafcd4da2, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T09:36:45,041 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 150ms, sequenceid=11, compaction requested=false 2024-11-15T09:36:45,046 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T09:36:45,047 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:36:45,047 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:45,047 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663404891Running coprocessor pre-close hooks at 1731663404891Disabling compacts and flushes for region at 1731663404891Disabling writes for close at 1731663404891Obtaining lock to block concurrent updates at 1731663404891Preparing flush snapshotting stores in 1588230740 at 1731663404891Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731663404892 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731663404910 (+18 ms)Flushing 1588230740/info: creating writer at 1731663404910Flushing 1588230740/info: appending metadata at 1731663404932 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731663404932Flushing 1588230740/ns: creating writer at 1731663404948 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731663404963 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731663404963Flushing 1588230740/table: creating writer at 1731663404979 (+16 ms)Flushing 1588230740/table: appending metadata at 1731663404996 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731663404997 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f47ab1: reopening flushed file at 1731663405013 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dfbc4cf: reopening flushed file at 1731663405022 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36f15676: reopening flushed file at 1731663405031 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 150ms, sequenceid=11, compaction requested=false at 1731663405041 (+10 ms)Writing region close event to WAL at 1731663405043 (+2 ms)Running coprocessor post-close hooks at 1731663405047 (+4 ms)Closed at 1731663405047 2024-11-15T09:36:45,048 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:45,091 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,46093,1731663365267; all regions closed. 2024-11-15T09:36:45,092 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:45,092 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:45,092 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:45,092 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:45,092 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:45,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741898_1083 (size=825) 2024-11-15T09:36:45,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741898_1083 (size=825) 2024-11-15T09:36:45,788 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:36:45,814 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T09:36:45,814 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T09:36:45,849 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T09:36:45,849 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T09:36:46,510 INFO [master/791f12959b23:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T09:36:46,510 INFO [master/791f12959b23:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T09:36:46,591 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6646a348[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45393, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=42301, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741831_1007 to 127.0.0.1:44981 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:46,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741835_1011 (size=393) 2024-11-15T09:36:46,759 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:36:47,589 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6646a348[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45393, datanodeUuid=ddc2173d-f513-4ad4-ac24-8be0bc5778af, infoPort=42301, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1263110437;c=1731663362998):Failed to transfer BP-1651719749-172.17.0.2-1731663362998:blk_1073741827_1003 to 127.0.0.1:44981 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:47,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:48,896 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 after 4001ms 2024-11-15T09:36:48,911 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta after 4002ms 2024-11-15T09:36:49,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741878_1061 (size=12911) 2024-11-15T09:36:49,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:49,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:49,895 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T09:36:49,897 DEBUG [RS:1;791f12959b23:41295 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs 2024-11-15T09:36:49,897 INFO [RS:1;791f12959b23:41295 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C41295%2C1731663366547:(num 1731663366900) 2024-11-15T09:36:49,897 DEBUG [RS:1;791f12959b23:41295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:49,897 INFO [RS:1;791f12959b23:41295 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:36:49,897 INFO [RS:1;791f12959b23:41295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:36:49,898 INFO [RS:1;791f12959b23:41295 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T09:36:49,898 INFO [RS:1;791f12959b23:41295 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:36:49,898 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:36:49,898 INFO [RS:1;791f12959b23:41295 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:36:49,898 INFO [RS:1;791f12959b23:41295 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:36:49,898 INFO [RS:1;791f12959b23:41295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:36:49,898 INFO [RS:1;791f12959b23:41295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41295 2024-11-15T09:36:49,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:49,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:49,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,41295,1731663366547 2024-11-15T09:36:49,961 INFO [RS:1;791f12959b23:41295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:36:49,972 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,41295,1731663366547] 2024-11-15T09:36:49,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,982 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,41295,1731663366547 already deleted, retry=false 2024-11-15T09:36:49,982 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,41295,1731663366547 expired; onlineServers=1 2024-11-15T09:36:49,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:49,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:50,072 INFO [RS:1;791f12959b23:41295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:36:50,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41295-0x1013dd9ff7e0002, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:50,072 INFO [RS:1;791f12959b23:41295 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,41295,1731663366547; zookeeper connection closed. 2024-11-15T09:36:50,074 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@76ceb2ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@76ceb2ee 2024-11-15T09:36:50,093 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T09:36:50,098 DEBUG [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs 2024-11-15T09:36:50,098 INFO [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C46093%2C1731663365267.meta:.meta(num 1731663404892) 2024-11-15T09:36:50,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,099 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741894_1078 (size=14682) 2024-11-15T09:36:50,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741894_1078 (size=14682) 2024-11-15T09:36:50,104 DEBUG [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs 2024-11-15T09:36:50,104 INFO [RS:0;791f12959b23:46093 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C46093%2C1731663365267:(num 1731663404574) 2024-11-15T09:36:50,104 DEBUG [RS:0;791f12959b23:46093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:50,104 INFO [RS:0;791f12959b23:46093 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:36:50,105 INFO [RS:0;791f12959b23:46093 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:36:50,105 INFO [RS:0;791f12959b23:46093 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T09:36:50,105 INFO [RS:0;791f12959b23:46093 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:36:50,105 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:36:50,105 INFO [RS:0;791f12959b23:46093 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46093 2024-11-15T09:36:50,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:50,117 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,46093,1731663365267 2024-11-15T09:36:50,117 INFO [RS:0;791f12959b23:46093 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:36:50,117 ERROR [pool-302-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$363/0x00007fc3f4903f28@37ee64c3 rejected from java.util.concurrent.ThreadPoolExecutor@33d1f6e6[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-15T09:36:50,127 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,46093,1731663365267] 2024-11-15T09:36:50,138 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,46093,1731663365267 already deleted, retry=false 2024-11-15T09:36:50,138 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,46093,1731663365267 expired; onlineServers=0 2024-11-15T09:36:50,138 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,39767,1731663365100' ***** 2024-11-15T09:36:50,138 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:36:50,138 INFO [M:0;791f12959b23:39767 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:36:50,138 INFO [M:0;791f12959b23:39767 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:36:50,138 DEBUG [M:0;791f12959b23:39767 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:36:50,138 DEBUG [M:0;791f12959b23:39767 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:36:50,138 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:36:50,138 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663365688 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663365688,5,FailOnTimeoutGroup] 2024-11-15T09:36:50,139 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663365684 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663365684,5,FailOnTimeoutGroup] 2024-11-15T09:36:50,139 INFO [M:0;791f12959b23:39767 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:36:50,139 INFO [M:0;791f12959b23:39767 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:36:50,139 DEBUG [M:0;791f12959b23:39767 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:36:50,139 INFO [M:0;791f12959b23:39767 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:36:50,139 INFO [M:0;791f12959b23:39767 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:36:50,139 INFO [M:0;791f12959b23:39767 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:36:50,139 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:36:50,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:36:50,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:50,151 DEBUG [M:0;791f12959b23:39767 {}] zookeeper.ZKUtil(347): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:36:50,151 WARN [M:0;791f12959b23:39767 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:36:50,152 INFO [M:0;791f12959b23:39767 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/.lastflushedseqids 2024-11-15T09:36:50,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741906_1092 (size=130) 2024-11-15T09:36:50,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741906_1092 (size=130) 2024-11-15T09:36:50,160 INFO [M:0;791f12959b23:39767 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:36:50,160 INFO [M:0;791f12959b23:39767 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:36:50,160 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:36:50,161 INFO [M:0;791f12959b23:39767 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:50,161 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:50,161 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:36:50,161 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:50,161 INFO [M:0;791f12959b23:39767 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-15T09:36:50,181 DEBUG [M:0;791f12959b23:39767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5315e1e1b7964ce181a21c594f73f58c is 82, key is hbase:meta,,1/info:regioninfo/1731663366422/Put/seqid=0 2024-11-15T09:36:50,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741907_1093 (size=5672) 2024-11-15T09:36:50,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741907_1093 (size=5672) 2024-11-15T09:36:50,187 INFO [M:0;791f12959b23:39767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5315e1e1b7964ce181a21c594f73f58c 2024-11-15T09:36:50,209 DEBUG [M:0;791f12959b23:39767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93247be9e65f4848bb5c9644d9ffd81e is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731663367217/Put/seqid=0 2024-11-15T09:36:50,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741908_1094 (size=6255) 2024-11-15T09:36:50,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741908_1094 (size=6255) 2024-11-15T09:36:50,214 INFO [M:0;791f12959b23:39767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93247be9e65f4848bb5c9644d9ffd81e 2024-11-15T09:36:50,220 INFO [M:0;791f12959b23:39767 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 93247be9e65f4848bb5c9644d9ffd81e 2024-11-15T09:36:50,227 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:50,227 INFO [RS:0;791f12959b23:46093 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:36:50,227 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46093-0x1013dd9ff7e0001, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:50,227 INFO [RS:0;791f12959b23:46093 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,46093,1731663365267; zookeeper connection closed. 2024-11-15T09:36:50,228 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@647d68c9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@647d68c9 2024-11-15T09:36:50,228 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-15T09:36:50,236 DEBUG [M:0;791f12959b23:39767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/02c6afb031504eca95b3e6d3d7419f10 is 69, key is 791f12959b23,41295,1731663366547/rs:state/1731663366626/Put/seqid=0 2024-11-15T09:36:50,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741909_1095 (size=5224) 2024-11-15T09:36:50,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741909_1095 (size=5224) 2024-11-15T09:36:50,242 INFO [M:0;791f12959b23:39767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/02c6afb031504eca95b3e6d3d7419f10 2024-11-15T09:36:50,266 DEBUG [M:0;791f12959b23:39767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7fbd5c9ce8c40f8936db74d39907bd5 is 52, key is load_balancer_on/state:d/1731663366527/Put/seqid=0 2024-11-15T09:36:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741910_1096 (size=5056) 2024-11-15T09:36:50,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741910_1096 (size=5056) 2024-11-15T09:36:50,271 INFO [M:0;791f12959b23:39767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7fbd5c9ce8c40f8936db74d39907bd5 2024-11-15T09:36:50,277 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5315e1e1b7964ce181a21c594f73f58c as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5315e1e1b7964ce181a21c594f73f58c 2024-11-15T09:36:50,283 INFO [M:0;791f12959b23:39767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5315e1e1b7964ce181a21c594f73f58c, entries=8, sequenceid=60, filesize=5.5 K 2024-11-15T09:36:50,284 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93247be9e65f4848bb5c9644d9ffd81e as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93247be9e65f4848bb5c9644d9ffd81e 2024-11-15T09:36:50,289 INFO [M:0;791f12959b23:39767 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 93247be9e65f4848bb5c9644d9ffd81e 2024-11-15T09:36:50,290 INFO [M:0;791f12959b23:39767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93247be9e65f4848bb5c9644d9ffd81e, entries=6, sequenceid=60, filesize=6.1 K 2024-11-15T09:36:50,291 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/02c6afb031504eca95b3e6d3d7419f10 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/02c6afb031504eca95b3e6d3d7419f10 2024-11-15T09:36:50,297 INFO [M:0;791f12959b23:39767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/02c6afb031504eca95b3e6d3d7419f10, entries=2, sequenceid=60, filesize=5.1 K 2024-11-15T09:36:50,298 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7fbd5c9ce8c40f8936db74d39907bd5 as hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7fbd5c9ce8c40f8936db74d39907bd5 2024-11-15T09:36:50,303 INFO [M:0;791f12959b23:39767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7fbd5c9ce8c40f8936db74d39907bd5, entries=1, sequenceid=60, filesize=4.9 K 2024-11-15T09:36:50,305 INFO [M:0;791f12959b23:39767 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false 2024-11-15T09:36:50,306 INFO [M:0;791f12959b23:39767 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:50,306 DEBUG [M:0;791f12959b23:39767 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663410160Disabling compacts and flushes for region at 1731663410160Disabling writes for close at 1731663410161 (+1 ms)Obtaining lock to block concurrent updates at 1731663410161Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663410161Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731663410161Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663410162 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663410162Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663410181 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663410181Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663410193 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663410208 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663410208Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663410220 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663410236 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663410236Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663410248 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663410265 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663410265Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@207e7396: reopening flushed file at 1731663410276 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16d8fddf: reopening flushed file at 1731663410283 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56a61fc1: reopening flushed file at 1731663410290 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fb78b9b: reopening flushed file at 1731663410297 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 143ms, sequenceid=60, compaction requested=false at 1731663410305 (+8 ms)Writing region close event to WAL at 1731663410306 (+1 ms)Closed at 1731663410306 2024-11-15T09:36:50,306 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,306 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,307 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,307 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,307 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:36:50,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45393 is added to blk_1073741892_1075 (size=1045) 2024-11-15T09:36:50,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741892_1075 (size=1045) 2024-11-15T09:36:50,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:36:50,521 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:50,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T09:36:50,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:36:50,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:36:50,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:36:50,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:50,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:50,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:51,614 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@522505f5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1651719749-172.17.0.2-1731663362998:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45915,null,null]) java.net.ConnectException: Call From 791f12959b23/172.17.0.2 to localhost:36911 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T09:36:51,709 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/WALs/791f12959b23,39767,1731663365100/791f12959b23%2C39767%2C1731663365100.1731663365457 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/oldWALs/791f12959b23%2C39767%2C1731663365100.1731663365457 2024-11-15T09:36:51,713 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/MasterData/oldWALs/791f12959b23%2C39767%2C1731663365100.1731663365457 to hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/oldWALs/791f12959b23%2C39767%2C1731663365100.1731663365457$masterlocalwal$ 2024-11-15T09:36:51,713 INFO [M:0;791f12959b23:39767 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:36:51,713 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:36:51,713 INFO [M:0;791f12959b23:39767 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39767 2024-11-15T09:36:51,714 INFO [M:0;791f12959b23:39767 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:36:51,830 INFO [M:0;791f12959b23:39767 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:36:51,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:51,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39767-0x1013dd9ff7e0000, quorum=127.0.0.1:58696, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:36:51,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@434810ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:51,834 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@381443d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:51,834 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:51,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@366bb257{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:51,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@182fe9c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:51,837 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:51,837 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:51,837 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651719749-172.17.0.2-1731663362998 (Datanode Uuid ddc2173d-f513-4ad4-ac24-8be0bc5778af) service to localhost/127.0.0.1:42831 2024-11-15T09:36:51,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:51,836 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a835b7b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45915,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:36911 , LocalHost:localPort 791f12959b23/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T09:36:51,837 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a835b7b {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1651719749-172.17.0.2-1731663362998:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45393,null,null], DatanodeInfoWithStorage[127.0.0.1:45915,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1651719749-172.17.0.2-1731663362998 2024-11-15T09:36:51,837 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a835b7b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45393,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1651719749-172.17.0.2-1731663362998 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:51,837 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a835b7b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45915,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1651719749-172.17.0.2-1731663362998 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:36:51,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data3/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:51,837 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2a835b7b {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45393,null,null], DatanodeInfoWithStorage[127.0.0.1:45915,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1651719749-172.17.0.2-1731663362998:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45393,null,null], DatanodeInfoWithStorage[127.0.0.1:45915,null,null]] 2024-11-15T09:36:51,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data4/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:51,838 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:51,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@495a6aea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:51,841 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5bf227cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:51,841 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:51,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45d50f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:51,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6030d470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:51,843 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:36:51,843 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:36:51,843 WARN [BP-1651719749-172.17.0.2-1731663362998 heartbeating to localhost/127.0.0.1:42831 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1651719749-172.17.0.2-1731663362998 (Datanode Uuid 4397d9d1-004a-4c14-84eb-4dc6d5e87712) service to localhost/127.0.0.1:42831 2024-11-15T09:36:51,843 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:36:51,843 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data5/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:51,844 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/cluster_4df6329d-44ce-ecea-fdc5-cdf725ad823a/data/data6/current/BP-1651719749-172.17.0.2-1731663362998 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:36:51,844 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:36:51,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@94a50db{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:36:51,850 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:36:51,850 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:36:51,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:36:51,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir/,STOPPED} 2024-11-15T09:36:51,857 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:36:51,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:36:51,903 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 79) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42831 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fc3f4bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42831 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42831 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42831 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42831 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42831 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42831 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42831 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42831 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42831 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fc3f4bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33511 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42831 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=167 (was 150) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3450 (was 4319) 2024-11-15T09:36:51,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:51,911 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=167, ProcessCount=11, AvailableMemoryMB=3450 2024-11-15T09:36:51,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:36:51,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.log.dir so I do NOT create it in target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127 2024-11-15T09:36:51,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f09c8bf2-6198-68d3-9e62-7e365994f60c/hadoop.tmp.dir so I do NOT create it in target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178, deleteOnExit=true 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/test.cache.data in system properties and HBase conf 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:36:51,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:36:51,912 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:36:51,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:36:51,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:36:51,926 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:36:52,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:52,308 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:52,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:52,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:52,317 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:36:52,318 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:52,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d9c9e99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:52,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bfebe40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:52,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1da660ce{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-40275-hadoop-hdfs-3_4_1-tests_jar-_-any-16226244985423308756/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:36:52,432 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25a29a07{HTTP/1.1, (http/1.1)}{localhost:40275} 2024-11-15T09:36:52,432 INFO [Time-limited test {}] server.Server(415): Started @158866ms 2024-11-15T09:36:52,444 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:36:52,703 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:52,706 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:52,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:52,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:52,707 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:36:52,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c71ff07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:52,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6fab6db5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:52,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31e2f2e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-37721-hadoop-hdfs-3_4_1-tests_jar-_-any-17512803258094701573/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:52,812 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fc8bed8{HTTP/1.1, (http/1.1)}{localhost:37721} 2024-11-15T09:36:52,813 INFO [Time-limited test {}] server.Server(415): Started @159246ms 2024-11-15T09:36:52,814 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:52,843 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:36:52,847 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:36:52,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:36:52,848 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:36:52,848 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:36:52,849 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23d454c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:36:52,849 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bb583ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:36:52,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:52,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:52,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f88b75b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-39483-hadoop-hdfs-3_4_1-tests_jar-_-any-17415377961683311284/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:36:52,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47946b20{HTTP/1.1, (http/1.1)}{localhost:39483} 2024-11-15T09:36:52,967 INFO [Time-limited test {}] server.Server(415): Started @159400ms 2024-11-15T09:36:52,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:36:53,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:53,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:54,297 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data2/current/BP-1629767906-172.17.0.2-1731663411931/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:54,297 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data1/current/BP-1629767906-172.17.0.2-1731663411931/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:54,314 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:54,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81a6535708820f56 with lease ID 0xba14fb616ee242a0: Processing first storage report for DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31 from datanode DatanodeRegistration(127.0.0.1:35197, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=40027, infoSecurePort=0, ipcPort=36903, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931) 2024-11-15T09:36:54,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81a6535708820f56 with lease ID 0xba14fb616ee242a0: from storage DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31 node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=40027, infoSecurePort=0, ipcPort=36903, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T09:36:54,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81a6535708820f56 with lease ID 0xba14fb616ee242a0: Processing first storage report for DS-218f713d-4327-4aae-aa4a-1c084b327104 from datanode DatanodeRegistration(127.0.0.1:35197, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=40027, infoSecurePort=0, ipcPort=36903, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931) 2024-11-15T09:36:54,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81a6535708820f56 with lease ID 0xba14fb616ee242a0: from storage DS-218f713d-4327-4aae-aa4a-1c084b327104 node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=40027, infoSecurePort=0, ipcPort=36903, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:54,503 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data3/current/BP-1629767906-172.17.0.2-1731663411931/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:54,503 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data4/current/BP-1629767906-172.17.0.2-1731663411931/current, will proceed with Du for space computation calculation, 2024-11-15T09:36:54,519 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:36:54,521 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95ca895f24993e6d with lease ID 0xba14fb616ee242a1: Processing first storage report for DS-b8a00768-30f5-4dea-aba1-44225fa7a624 from datanode DatanodeRegistration(127.0.0.1:43695, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=40473, infoSecurePort=0, ipcPort=34605, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931) 2024-11-15T09:36:54,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95ca895f24993e6d with lease ID 0xba14fb616ee242a1: from storage DS-b8a00768-30f5-4dea-aba1-44225fa7a624 node DatanodeRegistration(127.0.0.1:43695, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=40473, infoSecurePort=0, ipcPort=34605, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:54,521 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95ca895f24993e6d with lease ID 0xba14fb616ee242a1: Processing first storage report for DS-b8769575-5086-4b5a-90aa-5801fc41d4e4 from datanode DatanodeRegistration(127.0.0.1:43695, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=40473, infoSecurePort=0, ipcPort=34605, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931) 2024-11-15T09:36:54,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95ca895f24993e6d with lease ID 0xba14fb616ee242a1: from storage DS-b8769575-5086-4b5a-90aa-5801fc41d4e4 node DatanodeRegistration(127.0.0.1:43695, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=40473, infoSecurePort=0, ipcPort=34605, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:36:54,607 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127 2024-11-15T09:36:54,610 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/zookeeper_0, clientPort=62135, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:36:54,611 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62135 2024-11-15T09:36:54,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,613 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:36:54,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:36:54,623 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce with version=8 2024-11-15T09:36:54,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:36:54,626 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:36:54,626 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:54,627 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35237 2024-11-15T09:36:54,629 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35237 connecting to ZooKeeper ensemble=127.0.0.1:62135 2024-11-15T09:36:54,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352370x0, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:54,685 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35237-0x1013ddac0fc0000 connected 2024-11-15T09:36:54,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,766 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:54,768 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce, hbase.cluster.distributed=false 2024-11-15T09:36:54,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:54,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35237 2024-11-15T09:36:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35237 2024-11-15T09:36:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35237 2024-11-15T09:36:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35237 2024-11-15T09:36:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35237 2024-11-15T09:36:54,789 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:36:54,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:54,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:54,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:36:54,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:36:54,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:36:54,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:36:54,790 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:36:54,790 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32911 2024-11-15T09:36:54,792 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32911 connecting to ZooKeeper ensemble=127.0.0.1:62135 2024-11-15T09:36:54,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329110x0, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:36:54,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329110x0, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:36:54,807 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32911-0x1013ddac0fc0001 connected 2024-11-15T09:36:54,807 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:36:54,808 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:36:54,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:36:54,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:36:54,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32911 2024-11-15T09:36:54,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32911 2024-11-15T09:36:54,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32911 2024-11-15T09:36:54,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32911 2024-11-15T09:36:54,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32911 2024-11-15T09:36:54,826 DEBUG [M:0;791f12959b23:35237 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:35237 2024-11-15T09:36:54,826 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,35237,1731663414625 2024-11-15T09:36:54,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:54,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:54,839 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,35237,1731663414625 2024-11-15T09:36:54,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:36:54,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:54,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:54,849 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:36:54,850 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,35237,1731663414625 from backup master directory 2024-11-15T09:36:54,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,35237,1731663414625 2024-11-15T09:36:54,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:54,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:36:54,859 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:54,859 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,35237,1731663414625 2024-11-15T09:36:54,863 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/hbase.id] with ID: b0f0b304-ceaa-46fd-bc97-9f843c750366 2024-11-15T09:36:54,864 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/.tmp/hbase.id 2024-11-15T09:36:54,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:54,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:36:54,870 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/.tmp/hbase.id]:[hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/hbase.id] 2024-11-15T09:36:54,882 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:54,882 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:36:54,884 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T09:36:54,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:54,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:36:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:36:54,900 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:36:54,901 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:36:54,901 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:54,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:54,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:54,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:36:54,911 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store 2024-11-15T09:36:54,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:36:54,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:54,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:36:54,918 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:54,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:54,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:36:54,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:54,918 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:36:54,918 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663414918Disabling compacts and flushes for region at 1731663414918Disabling writes for close at 1731663414918Writing region close event to WAL at 1731663414918Closed at 1731663414918 2024-11-15T09:36:54,919 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/.initializing 2024-11-15T09:36:54,919 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625 2024-11-15T09:36:54,922 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C35237%2C1731663414625, suffix=, logDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625, archiveDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/oldWALs, maxLogs=10 2024-11-15T09:36:54,922 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C35237%2C1731663414625.1731663414922 2024-11-15T09:36:54,927 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 2024-11-15T09:36:54,928 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40027:40027),(127.0.0.1/127.0.0.1:40473:40473)] 2024-11-15T09:36:54,932 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:54,932 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:54,933 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,933 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:36:54,936 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:54,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:54,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:36:54,937 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:54,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:54,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:36:54,939 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:54,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:54,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,942 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:36:54,942 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:54,942 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:54,943 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,943 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,944 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,945 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,945 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,946 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:36:54,947 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:36:54,949 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:54,949 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717189, jitterRate=-0.08804802596569061}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:36:54,950 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663414933Initializing all the Stores at 1731663414934 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663414934Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663414934Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663414934Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663414934Cleaning up temporary data from old regions at 1731663414945 (+11 ms)Region opened successfully at 1731663414950 (+5 ms) 2024-11-15T09:36:54,952 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:36:54,956 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1ff0b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:54,957 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:36:54,957 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:36:54,957 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:36:54,958 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:36:54,958 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T09:36:54,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T09:36:54,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:36:54,961 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:36:54,962 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:36:54,975 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:36:54,975 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:36:54,976 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:36:54,985 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:36:54,986 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:36:54,987 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:36:54,996 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:36:54,998 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:36:55,006 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:36:55,009 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:36:55,017 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:36:55,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:55,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:36:55,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,028 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,35237,1731663414625, sessionid=0x1013ddac0fc0000, setting cluster-up flag (Was=false) 2024-11-15T09:36:55,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,080 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:36:55,081 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,35237,1731663414625 2024-11-15T09:36:55,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,133 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:36:55,134 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,35237,1731663414625 2024-11-15T09:36:55,135 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:36:55,137 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:55,137 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:36:55,138 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:36:55,138 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,35237,1731663414625 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:36:55,139 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:55,140 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,141 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663445141 2024-11-15T09:36:55,141 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:36:55,141 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:36:55,142 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:36:55,142 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:36:55,142 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:36:55,143 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:36:55,143 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:36:55,143 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663415143,5,FailOnTimeoutGroup] 2024-11-15T09:36:55,143 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663415143,5,FailOnTimeoutGroup] 2024-11-15T09:36:55,143 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,143 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:36:55,143 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,143 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,144 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,144 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:36:55,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:36:55,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:36:55,163 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:36:55,164 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce 2024-11-15T09:36:55,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:55,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:36:55,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:55,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:36:55,176 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:36:55,176 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,177 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:36:55,178 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:36:55,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:36:55,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:36:55,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,181 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:36:55,182 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:36:55,182 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:36:55,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740 2024-11-15T09:36:55,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740 2024-11-15T09:36:55,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:36:55,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:36:55,186 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:36:55,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:36:55,190 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:55,191 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722463, jitterRate=-0.08134086430072784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:36:55,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663415173Initializing all the Stores at 1731663415174 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663415174Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663415174Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663415174Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663415174Cleaning up temporary data from old regions at 1731663415186 (+12 ms)Region opened successfully at 1731663415191 (+5 ms) 2024-11-15T09:36:55,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:36:55,192 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:36:55,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:36:55,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:36:55,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:36:55,192 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:36:55,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663415192Disabling compacts and flushes for region at 1731663415192Disabling writes for close at 1731663415192Writing region close event to WAL at 1731663415192Closed at 1731663415192 2024-11-15T09:36:55,194 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:55,194 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:36:55,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:36:55,195 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:36:55,197 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:36:55,213 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(746): ClusterId : b0f0b304-ceaa-46fd-bc97-9f843c750366 2024-11-15T09:36:55,213 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:36:55,218 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:36:55,218 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:36:55,228 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:36:55,229 DEBUG [RS:0;791f12959b23:32911 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@682cb275, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:36:55,245 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:32911 2024-11-15T09:36:55,246 INFO [RS:0;791f12959b23:32911 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:36:55,246 INFO [RS:0;791f12959b23:32911 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:36:55,246 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:36:55,246 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,35237,1731663414625 with port=32911, startcode=1731663414789 2024-11-15T09:36:55,247 DEBUG [RS:0;791f12959b23:32911 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:36:55,250 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56749, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:36:55,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35237 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,32911,1731663414789 2024-11-15T09:36:55,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35237 {}] master.ServerManager(517): Registering regionserver=791f12959b23,32911,1731663414789 2024-11-15T09:36:55,253 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce 2024-11-15T09:36:55,253 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44521 2024-11-15T09:36:55,253 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:36:55,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:36:55,260 DEBUG [RS:0;791f12959b23:32911 {}] zookeeper.ZKUtil(111): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,32911,1731663414789 2024-11-15T09:36:55,260 WARN [RS:0;791f12959b23:32911 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:36:55,260 INFO [RS:0;791f12959b23:32911 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:55,260 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789 2024-11-15T09:36:55,260 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,32911,1731663414789] 2024-11-15T09:36:55,264 INFO [RS:0;791f12959b23:32911 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:36:55,266 INFO [RS:0;791f12959b23:32911 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:36:55,269 INFO [RS:0;791f12959b23:32911 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:36:55,269 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,269 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:36:55,270 INFO [RS:0;791f12959b23:32911 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:36:55,270 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,271 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:36:55,272 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:55,272 DEBUG [RS:0;791f12959b23:32911 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:36:55,276 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,276 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,276 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,276 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,276 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,277 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,32911,1731663414789-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:55,301 INFO [RS:0;791f12959b23:32911 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:36:55,301 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,32911,1731663414789-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,301 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,301 INFO [RS:0;791f12959b23:32911 {}] regionserver.Replication(171): 791f12959b23,32911,1731663414789 started 2024-11-15T09:36:55,322 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,322 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,32911,1731663414789, RpcServer on 791f12959b23/172.17.0.2:32911, sessionid=0x1013ddac0fc0001 2024-11-15T09:36:55,322 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:36:55,322 DEBUG [RS:0;791f12959b23:32911 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,32911,1731663414789 2024-11-15T09:36:55,322 DEBUG [RS:0;791f12959b23:32911 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,32911,1731663414789' 2024-11-15T09:36:55,323 DEBUG [RS:0;791f12959b23:32911 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:36:55,323 DEBUG [RS:0;791f12959b23:32911 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:36:55,323 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:36:55,323 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:36:55,323 DEBUG [RS:0;791f12959b23:32911 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,32911,1731663414789 2024-11-15T09:36:55,323 DEBUG [RS:0;791f12959b23:32911 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,32911,1731663414789' 2024-11-15T09:36:55,324 DEBUG [RS:0;791f12959b23:32911 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:36:55,324 DEBUG [RS:0;791f12959b23:32911 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:36:55,324 DEBUG [RS:0;791f12959b23:32911 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:36:55,324 INFO [RS:0;791f12959b23:32911 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:36:55,324 INFO [RS:0;791f12959b23:32911 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:36:55,348 WARN [791f12959b23:35237 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:36:55,426 INFO [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C32911%2C1731663414789, suffix=, logDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789, archiveDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs, maxLogs=32 2024-11-15T09:36:55,427 INFO [RS:0;791f12959b23:32911 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:36:55,434 INFO [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:36:55,435 DEBUG [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40027:40027),(127.0.0.1/127.0.0.1:40473:40473)] 2024-11-15T09:36:55,598 DEBUG [791f12959b23:35237 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:36:55,599 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,32911,1731663414789 2024-11-15T09:36:55,600 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,32911,1731663414789, state=OPENING 2024-11-15T09:36:55,606 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:36:55,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:36:55,618 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:36:55,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:55,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,32911,1731663414789}] 2024-11-15T09:36:55,618 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:55,772 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:36:55,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55701, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:36:55,777 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:36:55,778 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:36:55,780 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C32911%2C1731663414789.meta, suffix=.meta, logDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789, archiveDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs, maxLogs=32 2024-11-15T09:36:55,780 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta 2024-11-15T09:36:55,785 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta 2024-11-15T09:36:55,786 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40027:40027),(127.0.0.1/127.0.0.1:40473:40473)] 2024-11-15T09:36:55,787 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:55,788 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:36:55,788 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:36:55,788 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:36:55,788 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:36:55,788 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:55,788 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:36:55,788 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:36:55,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:36:55,791 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:36:55,791 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:36:55,792 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:36:55,792 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:36:55,794 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:36:55,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:36:55,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:36:55,796 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:36:55,796 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:36:55,797 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740 2024-11-15T09:36:55,799 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740 2024-11-15T09:36:55,800 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:36:55,800 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:36:55,801 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:36:55,803 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:36:55,804 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782873, jitterRate=-0.00452597439289093}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:36:55,804 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:36:55,805 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663415789Writing region info on filesystem at 1731663415789Initializing all the Stores at 1731663415789Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663415790 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663415790Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663415790Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663415790Cleaning up temporary data from old regions at 1731663415800 (+10 ms)Running coprocessor post-open hooks at 1731663415804 (+4 ms)Region opened successfully at 1731663415805 (+1 ms) 2024-11-15T09:36:55,806 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663415771 2024-11-15T09:36:55,808 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:36:55,808 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:36:55,809 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,32911,1731663414789 2024-11-15T09:36:55,811 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,32911,1731663414789, state=OPEN 2024-11-15T09:36:55,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:36:55,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:36:55,877 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,32911,1731663414789 2024-11-15T09:36:55,878 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:55,878 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:36:55,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:36:55,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,32911,1731663414789 in 260 msec 2024-11-15T09:36:55,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:36:55,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 687 msec 2024-11-15T09:36:55,886 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:36:55,886 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:36:55,887 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:36:55,888 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,32911,1731663414789, seqNum=-1] 2024-11-15T09:36:55,888 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:36:55,889 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40533, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:36:55,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 758 msec 2024-11-15T09:36:55,896 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663415896, completionTime=-1 2024-11-15T09:36:55,896 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:36:55,896 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663475898 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663535898 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35237,1731663414625-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35237,1731663414625-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35237,1731663414625-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:35237, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,898 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,899 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,900 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.044sec 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35237,1731663414625-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:36:55,903 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35237,1731663414625-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:36:55,906 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:36:55,906 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:36:55,906 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35237,1731663414625-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:36:55,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:55,913 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31267d1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:55,913 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,35237,-1 for getting cluster id 2024-11-15T09:36:55,913 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:36:55,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:55,915 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b0f0b304-ceaa-46fd-bc97-9f843c750366' 2024-11-15T09:36:55,915 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:36:55,916 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b0f0b304-ceaa-46fd-bc97-9f843c750366" 2024-11-15T09:36:55,916 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76b0cb2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:55,916 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,35237,-1] 2024-11-15T09:36:55,916 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:36:55,917 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:36:55,918 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:36:55,919 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27265229, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:36:55,920 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:36:55,921 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,32911,1731663414789, seqNum=-1] 2024-11-15T09:36:55,922 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:36:55,924 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34340, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:36:55,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,35237,1731663414625 2024-11-15T09:36:55,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:36:55,929 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:36:55,929 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-15T09:36:55,929 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-15T09:36:55,930 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T09:36:55,931 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 791f12959b23,35237,1731663414625 2024-11-15T09:36:55,931 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4654fc74 2024-11-15T09:36:55,931 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T09:36:55,933 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35238, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T09:36:55,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T09:36:55,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T09:36:55,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:36:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T09:36:55,937 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T09:36:55,937 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:55,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-15T09:36:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:36:55,939 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T09:36:55,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741835_1011 (size=395) 2024-11-15T09:36:55,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741835_1011 (size=395) 2024-11-15T09:36:55,949 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ddc261b21c33dbf278c691aaa8cf3246, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce 2024-11-15T09:36:55,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43695 is added to blk_1073741836_1012 (size=78) 2024-11-15T09:36:55,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35197 is added to blk_1073741836_1012 (size=78) 2024-11-15T09:36:55,962 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:55,962 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing ddc261b21c33dbf278c691aaa8cf3246, disabling compactions & flushes 2024-11-15T09:36:55,962 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:55,962 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:55,962 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. after waiting 0 ms 2024-11-15T09:36:55,962 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:55,962 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:55,963 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for ddc261b21c33dbf278c691aaa8cf3246: Waiting for close lock at 1731663415962Disabling compacts and flushes for region at 1731663415962Disabling writes for close at 1731663415962Writing region close event to WAL at 1731663415962Closed at 1731663415962 2024-11-15T09:36:55,964 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T09:36:55,964 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731663415964"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663415964"}]},"ts":"1731663415964"} 2024-11-15T09:36:55,966 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T09:36:55,968 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T09:36:55,968 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663415968"}]},"ts":"1731663415968"} 2024-11-15T09:36:55,970 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-15T09:36:55,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ddc261b21c33dbf278c691aaa8cf3246, ASSIGN}] 2024-11-15T09:36:55,972 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ddc261b21c33dbf278c691aaa8cf3246, ASSIGN 2024-11-15T09:36:55,973 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ddc261b21c33dbf278c691aaa8cf3246, ASSIGN; state=OFFLINE, location=791f12959b23,32911,1731663414789; forceNewPlan=false, retain=false 2024-11-15T09:36:56,041 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:36:56,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,073 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,073 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,073 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:36:56,124 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ddc261b21c33dbf278c691aaa8cf3246, regionState=OPENING, regionLocation=791f12959b23,32911,1731663414789 2024-11-15T09:36:56,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ddc261b21c33dbf278c691aaa8cf3246, ASSIGN because future has completed 2024-11-15T09:36:56,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ddc261b21c33dbf278c691aaa8cf3246, server=791f12959b23,32911,1731663414789}] 2024-11-15T09:36:56,286 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:56,286 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ddc261b21c33dbf278c691aaa8cf3246, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:36:56,286 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,286 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:36:56,287 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,287 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,288 INFO [StoreOpener-ddc261b21c33dbf278c691aaa8cf3246-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,291 INFO [StoreOpener-ddc261b21c33dbf278c691aaa8cf3246-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddc261b21c33dbf278c691aaa8cf3246 columnFamilyName info 2024-11-15T09:36:56,291 DEBUG [StoreOpener-ddc261b21c33dbf278c691aaa8cf3246-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:36:56,292 INFO [StoreOpener-ddc261b21c33dbf278c691aaa8cf3246-1 {}] regionserver.HStore(327): Store=ddc261b21c33dbf278c691aaa8cf3246/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:36:56,292 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,293 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,293 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,294 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,294 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,295 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,297 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:36:56,298 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ddc261b21c33dbf278c691aaa8cf3246; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832555, jitterRate=0.058649078011512756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:36:56,298 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:36:56,299 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ddc261b21c33dbf278c691aaa8cf3246: Running coprocessor pre-open hook at 1731663416287Writing region info on filesystem at 1731663416287Initializing all the Stores at 1731663416288 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663416288Cleaning up temporary data from old regions at 1731663416294 (+6 ms)Running coprocessor post-open hooks at 1731663416298 (+4 ms)Region opened successfully at 1731663416298 2024-11-15T09:36:56,299 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246., pid=6, masterSystemTime=1731663416281 2024-11-15T09:36:56,302 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:56,302 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:36:56,303 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ddc261b21c33dbf278c691aaa8cf3246, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,32911,1731663414789 2024-11-15T09:36:56,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ddc261b21c33dbf278c691aaa8cf3246, server=791f12959b23,32911,1731663414789 because future has completed 2024-11-15T09:36:56,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T09:36:56,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ddc261b21c33dbf278c691aaa8cf3246, server=791f12959b23,32911,1731663414789 in 179 msec 2024-11-15T09:36:56,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T09:36:56,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=ddc261b21c33dbf278c691aaa8cf3246, ASSIGN in 338 msec 2024-11-15T09:36:56,313 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T09:36:56,314 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663416313"}]},"ts":"1731663416313"} 2024-11-15T09:36:56,316 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-15T09:36:56,317 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T09:36:56,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 383 msec 2024-11-15T09:36:56,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:56,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:57,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:57,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:58,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:58,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:59,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:36:59,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:00,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:37:00,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T09:37:00,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T09:37:00,228 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-15T09:37:00,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:37:00,229 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T09:37:00,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:37:00,229 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T09:37:00,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:00,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:01,290 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:37:01,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:01,329 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T09:37:01,329 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-15T09:37:01,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:01,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:02,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:02,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:03,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:03,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:04,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:04,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:05,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:05,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:05,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35237 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:37:05,967 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-15T09:37:05,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-15T09:37:05,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T09:37:05,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:37:05,974 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246., hostname=791f12959b23,32911,1731663414789, seqNum=2] 2024-11-15T09:37:06,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:06,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:07,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:07,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:07,977 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:37:07,978 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:07,978 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:07,979 WARN [DataStreamer for file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta block BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK], DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]) is bad. 2024-11-15T09:37:07,979 WARN [PacketResponder: BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43695] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,979 WARN [PacketResponder: BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43695] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,979 WARN [DataStreamer for file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 block BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK], DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]) is bad. 2024-11-15T09:37:07,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1524174815_22 at /127.0.0.1:47834 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47834 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:47892 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47892 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,978 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:07,980 WARN [DataStreamer for file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 block BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK], DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43695,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]) is bad. 2024-11-15T09:37:07,980 WARN [PacketResponder: BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43695] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1524174815_22 at /127.0.0.1:43744 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43695:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43744 dst: /127.0.0.1:43695 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:43782 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43695:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43782 dst: /127.0.0.1:43695 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:47888 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47888 dst: /127.0.0.1:35197 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:07,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:43766 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43695:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43766 dst: /127.0.0.1:43695 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:08,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f88b75b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:08,014 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47946b20{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:08,014 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:08,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bb583ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:08,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23d454c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:08,015 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:37:08,015 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1629767906-172.17.0.2-1731663411931 (Datanode Uuid 399051cd-ea78-4153-9b2b-29d93c63f800) service to localhost/127.0.0.1:44521 2024-11-15T09:37:08,015 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:37:08,016 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:37:08,016 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data3/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:08,016 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data4/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:08,016 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:37:08,024 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:08,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:08,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:08,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:08,030 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:37:08,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24d66024{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:08,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e3cd135{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:08,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@117b424d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-41219-hadoop-hdfs-3_4_1-tests_jar-_-any-11881358514355034433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:08,139 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2460467f{HTTP/1.1, (http/1.1)}{localhost:41219} 2024-11-15T09:37:08,140 INFO [Time-limited test {}] server.Server(415): Started @174573ms 2024-11-15T09:37:08,141 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:37:08,172 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:08,172 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:08,172 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:08,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:39328 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39328 dst: /127.0.0.1:35197 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:08,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1524174815_22 at /127.0.0.1:39336 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39336 dst: /127.0.0.1:35197 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:08,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31e2f2e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:08,175 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fc8bed8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:08,175 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:08,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6fab6db5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:08,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c71ff07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:08,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:39322 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39322 dst: /127.0.0.1:35197 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 59807 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:08,184 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:37:08,184 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:37:08,184 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:37:08,184 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1629767906-172.17.0.2-1731663411931 (Datanode Uuid 45e2ab00-e51e-42de-8686-dac1897dbe9a) service to localhost/127.0.0.1:44521 2024-11-15T09:37:08,185 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data1/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:08,185 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data2/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:08,185 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:37:08,207 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:08,215 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:08,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:08,221 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:08,221 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:37:08,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1223970{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:08,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e84b526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:08,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b559376{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-45709-hadoop-hdfs-3_4_1-tests_jar-_-any-6859380218902395470/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:08,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75ed142f{HTTP/1.1, (http/1.1)}{localhost:45709} 2024-11-15T09:37:08,336 INFO [Time-limited test {}] server.Server(415): Started @174769ms 2024-11-15T09:37:08,337 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:37:08,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:08,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:08,938 WARN [Thread-1333 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:37:08,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9818d384476c48b with lease ID 0xba14fb616ee242a2: from storage DS-b8a00768-30f5-4dea-aba1-44225fa7a624 node DatanodeRegistration(127.0.0.1:41943, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=43737, infoSecurePort=0, ipcPort=36627, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:08,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9818d384476c48b with lease ID 0xba14fb616ee242a2: from storage DS-b8769575-5086-4b5a-90aa-5801fc41d4e4 node DatanodeRegistration(127.0.0.1:41943, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=43737, infoSecurePort=0, ipcPort=36627, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:09,192 WARN [Thread-1353 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:37:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9bb750c8866cac5 with lease ID 0xba14fb616ee242a3: from storage DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31 node DatanodeRegistration(127.0.0.1:36495, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=44581, infoSecurePort=0, ipcPort=46249, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9bb750c8866cac5 with lease ID 0xba14fb616ee242a3: from storage DS-218f713d-4327-4aae-aa4a-1c084b327104 node DatanodeRegistration(127.0.0.1:36495, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=44581, infoSecurePort=0, ipcPort=46249, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:09,382 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-15T09:37:09,385 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-15T09:37:09,386 ERROR [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:09,386 WARN [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:09,387 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C32911%2C1731663414789:(num 1731663415427) roll requested 2024-11-15T09:37:09,387 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:09,393 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 newFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:09,393 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:09,394 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:09,394 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:09,394 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:09,394 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:09,394 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:09,394 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:09,395 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:09,395 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:37:09,395 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44581:44581),(127.0.0.1/127.0.0.1:43737:43737)] 2024-11-15T09:37:09,395 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 is not closed yet, will try archiving it next time 2024-11-15T09:37:09,395 WARN [IPC Server handler 2 on default port 44521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-15T09:37:09,396 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 after 0ms 2024-11-15T09:37:09,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:09,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:10,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:10,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:10,941 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T09:37:11,398 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-15T09:37:11,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:11,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:12,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:12,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:13,396 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 after 4001ms 2024-11-15T09:37:13,401 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:13,402 WARN [DataStreamer for file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 block BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36495,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36495,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]) is bad. 2024-11-15T09:37:13,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:46562 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46562 dst: /127.0.0.1:36495 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:13,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:59054 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59054 dst: /127.0.0.1:41943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:13,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b559376{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:13,498 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75ed142f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:13,498 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:13,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e84b526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:13,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1223970{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:13,500 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:37:13,500 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:37:13,500 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:37:13,500 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1629767906-172.17.0.2-1731663411931 (Datanode Uuid 45e2ab00-e51e-42de-8686-dac1897dbe9a) service to localhost/127.0.0.1:44521 2024-11-15T09:37:13,500 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data1/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:13,501 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data2/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:13,501 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:37:13,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:13,512 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:13,513 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:13,513 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:13,513 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:37:13,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fbd08cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:13,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e4fdbc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:13,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d237fac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-34111-hadoop-hdfs-3_4_1-tests_jar-_-any-6765802835144366263/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:13,620 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19016e01{HTTP/1.1, (http/1.1)}{localhost:34111} 2024-11-15T09:37:13,620 INFO [Time-limited test {}] server.Server(415): Started @180053ms 2024-11-15T09:37:13,622 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:37:13,657 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:13,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-368324727_22 at /127.0.0.1:37990 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37990 dst: /127.0.0.1:41943 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:13,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@117b424d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:13,665 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2460467f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:13,665 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:13,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e3cd135{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:13,666 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24d66024{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:13,666 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:37:13,666 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1629767906-172.17.0.2-1731663411931 (Datanode Uuid 399051cd-ea78-4153-9b2b-29d93c63f800) service to localhost/127.0.0.1:44521 2024-11-15T09:37:13,666 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:37:13,667 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:37:13,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data3/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:13,667 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data4/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:13,668 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:37:13,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:13,683 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:13,684 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:13,684 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:13,684 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:37:13,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6023e2fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:13,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f78a2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:13,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fc558ce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/java.io.tmpdir/jetty-localhost-44347-hadoop-hdfs-3_4_1-tests_jar-_-any-16227032734533558320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:13,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c39138a{HTTP/1.1, (http/1.1)}{localhost:44347} 2024-11-15T09:37:13,788 INFO [Time-limited test {}] server.Server(415): Started @180221ms 2024-11-15T09:37:13,790 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:37:13,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:13,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:14,274 WARN [Thread-1407 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:37:14,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfc9a3e1ce145121 with lease ID 0xba14fb616ee242a4: from storage DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31 node DatanodeRegistration(127.0.0.1:45161, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=33899, infoSecurePort=0, ipcPort=42209, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:14,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfc9a3e1ce145121 with lease ID 0xba14fb616ee242a4: from storage DS-218f713d-4327-4aae-aa4a-1c084b327104 node DatanodeRegistration(127.0.0.1:45161, datanodeUuid=45e2ab00-e51e-42de-8686-dac1897dbe9a, infoPort=33899, infoSecurePort=0, ipcPort=42209, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:14,429 WARN [Thread-1427 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:37:14,432 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7828dc4a9efd11f7 with lease ID 0xba14fb616ee242a5: from storage DS-b8a00768-30f5-4dea-aba1-44225fa7a624 node DatanodeRegistration(127.0.0.1:44421, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=36531, infoSecurePort=0, ipcPort=37569, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:14,432 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7828dc4a9efd11f7 with lease ID 0xba14fb616ee242a5: from storage DS-b8769575-5086-4b5a-90aa-5801fc41d4e4 node DatanodeRegistration(127.0.0.1:44421, datanodeUuid=399051cd-ea78-4153-9b2b-29d93c63f800, infoPort=36531, infoSecurePort=0, ipcPort=37569, storageInfo=lv=-57;cid=testClusterID;nsid=1860331765;c=1731663411931), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:14,810 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-15T09:37:14,813 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-15T09:37:14,816 ERROR [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:14,816 WARN [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:14,816 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C32911%2C1731663414789:(num 1731663429387) roll requested 2024-11-15T09:37:14,817 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.1731663434816 2024-11-15T09:37:14,828 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 newFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 2024-11-15T09:37:14,829 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:14,829 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:14,829 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:14,829 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:14,829 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:14,829 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 2024-11-15T09:37:14,829 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:14,829 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:14,829 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:14,830 WARN [IPC Server handler 4 on default port 44521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-15T09:37:14,830 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 after 1ms 2024-11-15T09:37:14,840 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36531:36531),(127.0.0.1/127.0.0.1:33899:33899)] 2024-11-15T09:37:14,840 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 is not closed yet, will try archiving it next time 2024-11-15T09:37:14,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:14,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:15,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:15,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:16,842 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:16,849 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 newFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:16,849 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:16,849 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:16,849 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:16,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:16,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:16,850 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:16,851 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33899:33899),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-15T09:37:16,851 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 is not closed yet, will try archiving it next time 2024-11-15T09:37:16,851 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 is not closed yet, will try archiving it next time 2024-11-15T09:37:16,851 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:37:16,851 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:37:16,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741838_1019 (size=1264) 2024-11-15T09:37:16,852 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 after 1ms 2024-11-15T09:37:16,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741838_1019 (size=1264) 2024-11-15T09:37:16,852 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:37:16,853 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 is not closed yet, will try archiving it next time 2024-11-15T09:37:16,864 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731663416299/Put/vlen=218/seqid=0] 2024-11-15T09:37:16,864 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731663425976/Put/vlen=1045/seqid=0] 2024-11-15T09:37:16,864 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663415427 2024-11-15T09:37:16,864 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:16,864 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:16,865 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 after 1ms 2024-11-15T09:37:16,865 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:16,869 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731663429386/Put/vlen=1045/seqid=0] 2024-11-15T09:37:16,869 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731663431399/Put/vlen=1045/seqid=0] 2024-11-15T09:37:16,869 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 2024-11-15T09:37:16,869 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 2024-11-15T09:37:16,869 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 2024-11-15T09:37:16,869 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 after 0ms 2024-11-15T09:37:16,869 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663434816 2024-11-15T09:37:16,873 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731663434815/Put/vlen=1045/seqid=0] 2024-11-15T09:37:16,873 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:16,874 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:16,874 WARN [IPC Server handler 3 on default port 44521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-15T09:37:16,874 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 after 0ms 2024-11-15T09:37:16,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:16,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:17,435 WARN [ResponseProcessor for block BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:17,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1524174815_22 at /127.0.0.1:53928 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45161:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53928 dst: /127.0.0.1:45161 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45161 remote=/127.0.0.1:53928]. Total timeout mills is 60000, 59413 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:17,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1524174815_22 at /127.0.0.1:57726 [Receiving block BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44421:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57726 dst: /127.0.0.1:44421 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:17,435 WARN [DataStreamer for file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 block BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45161,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK], DatanodeInfoWithStorage[127.0.0.1:44421,DS-b8a00768-30f5-4dea-aba1-44225fa7a624,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45161,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]) is bad. 2024-11-15T09:37:17,436 WARN [DataStreamer for file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 block BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:17,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741839_1022 (size=85) 2024-11-15T09:37:17,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:17,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:18,831 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663429387 after 4002ms 2024-11-15T09:37:18,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:18,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:19,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:19,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:20,278 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T09:37:20,875 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 after 4001ms 2024-11-15T09:37:20,875 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:20,881 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:20,881 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-15T09:37:20,882 ERROR [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:20,882 WARN [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:20,882 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C32911%2C1731663414789.meta:.meta(num 1731663415780) roll requested 2024-11-15T09:37:20,883 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.meta.1731663440883.meta 2024-11-15T09:37:20,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:20,889 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:20,890 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:20,890 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:20,890 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:20,890 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663440883.meta 2024-11-15T09:37:20,892 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:20,892 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:20,893 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta 2024-11-15T09:37:20,893 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33899:33899),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-15T09:37:20,893 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta is not closed yet, will try archiving it next time 2024-11-15T09:37:20,893 WARN [IPC Server handler 0 on default port 44521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-15T09:37:20,893 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta after 0ms 2024-11-15T09:37:20,913 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/info/49f5167f5712404e87ad3e7d3ff0bcb1 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246./info:regioninfo/1731663416303/Put/seqid=0 2024-11-15T09:37:20,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741841_1025 (size=7125) 2024-11-15T09:37:20,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741841_1025 (size=7125) 2024-11-15T09:37:20,922 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/info/49f5167f5712404e87ad3e7d3ff0bcb1 2024-11-15T09:37:20,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:20,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:20,944 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/ns/b4ec4fcf682c4e198a2fc30ed3727e29 is 43, key is default/ns:d/1731663415890/Put/seqid=0 2024-11-15T09:37:20,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741842_1026 (size=5153) 2024-11-15T09:37:20,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741842_1026 (size=5153) 2024-11-15T09:37:20,949 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/ns/b4ec4fcf682c4e198a2fc30ed3727e29 2024-11-15T09:37:20,972 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/table/5f94d118e9e24612a95d30c1c1c57dfb is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731663416313/Put/seqid=0 2024-11-15T09:37:20,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741843_1027 (size=5438) 2024-11-15T09:37:20,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741843_1027 (size=5438) 2024-11-15T09:37:20,977 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/table/5f94d118e9e24612a95d30c1c1c57dfb 2024-11-15T09:37:20,984 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/info/49f5167f5712404e87ad3e7d3ff0bcb1 as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/info/49f5167f5712404e87ad3e7d3ff0bcb1 2024-11-15T09:37:20,990 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/info/49f5167f5712404e87ad3e7d3ff0bcb1, entries=10, sequenceid=11, filesize=7.0 K 2024-11-15T09:37:20,991 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/ns/b4ec4fcf682c4e198a2fc30ed3727e29 as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/ns/b4ec4fcf682c4e198a2fc30ed3727e29 2024-11-15T09:37:20,997 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/ns/b4ec4fcf682c4e198a2fc30ed3727e29, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T09:37:20,999 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/.tmp/table/5f94d118e9e24612a95d30c1c1c57dfb as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/table/5f94d118e9e24612a95d30c1c1c57dfb 2024-11-15T09:37:21,005 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/table/5f94d118e9e24612a95d30c1c1c57dfb, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T09:37:21,006 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-11-15T09:37:21,006 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T09:37:21,006 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing ddc261b21c33dbf278c691aaa8cf3246 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-15T09:37:21,007 ERROR [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:21,007 WARN [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce-prefix:791f12959b23,32911,1731663414789 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:21,007 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C32911%2C1731663414789:(num 1731663436841) roll requested 2024-11-15T09:37:21,008 INFO [regionserver/791f12959b23:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C32911%2C1731663414789.1731663441007 2024-11-15T09:37:21,013 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 newFile=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663441007 2024-11-15T09:37:21,013 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,013 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,013 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,013 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,013 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,014 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663441007 2024-11-15T09:37:21,014 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:21,014 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1629767906-172.17.0.2-1731663411931:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:21,014 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:21,015 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 after 1ms 2024-11-15T09:37:21,016 DEBUG [regionserver/791f12959b23:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36531:36531),(127.0.0.1/127.0.0.1:33899:33899)] 2024-11-15T09:37:21,020 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.1731663436841 to hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs/791f12959b23%2C32911%2C1731663414789.1731663436841 2024-11-15T09:37:21,034 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/.tmp/info/85319d9af1be40499cc5897c13746757 is 1080, key is row1002/info:/1731663425976/Put/seqid=0 2024-11-15T09:37:21,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741845_1029 (size=9270) 2024-11-15T09:37:21,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741845_1029 (size=9270) 2024-11-15T09:37:21,040 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/.tmp/info/85319d9af1be40499cc5897c13746757 2024-11-15T09:37:21,046 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/.tmp/info/85319d9af1be40499cc5897c13746757 as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/info/85319d9af1be40499cc5897c13746757 2024-11-15T09:37:21,053 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/info/85319d9af1be40499cc5897c13746757, entries=4, sequenceid=8, filesize=9.1 K 2024-11-15T09:37:21,054 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for ddc261b21c33dbf278c691aaa8cf3246 in 48ms, sequenceid=8, compaction requested=false 2024-11-15T09:37:21,054 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for ddc261b21c33dbf278c691aaa8cf3246: 2024-11-15T09:37:21,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:37:21,060 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:37:21,060 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:37:21,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:37:21,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:37:21,060 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:37:21,060 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:37:21,060 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2099266181, stopped=false 2024-11-15T09:37:21,060 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,35237,1731663414625 2024-11-15T09:37:21,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:37:21,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:37:21,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:21,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:21,132 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:37:21,132 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:37:21,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:37:21,132 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:37:21,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:37:21,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:37:21,133 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,32911,1731663414789' ***** 2024-11-15T09:37:21,133 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:37:21,133 INFO [RS:0;791f12959b23:32911 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:37:21,134 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:37:21,134 INFO [RS:0;791f12959b23:32911 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:37:21,134 INFO [RS:0;791f12959b23:32911 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:37:21,134 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(3091): Received CLOSE for ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:37:21,134 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,32911,1731663414789 2024-11-15T09:37:21,134 INFO [RS:0;791f12959b23:32911 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:37:21,135 INFO [RS:0;791f12959b23:32911 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:32911. 2024-11-15T09:37:21,135 DEBUG [RS:0;791f12959b23:32911 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:37:21,135 DEBUG [RS:0;791f12959b23:32911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:37:21,135 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ddc261b21c33dbf278c691aaa8cf3246, disabling compactions & flushes 2024-11-15T09:37:21,135 INFO [RS:0;791f12959b23:32911 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:37:21,135 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:37:21,135 INFO [RS:0;791f12959b23:32911 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:37:21,135 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:37:21,135 INFO [RS:0;791f12959b23:32911 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:37:21,135 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. after waiting 0 ms 2024-11-15T09:37:21,135 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:37:21,135 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:37:21,135 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T09:37:21,136 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, ddc261b21c33dbf278c691aaa8cf3246=TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246.} 2024-11-15T09:37:21,136 DEBUG [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ddc261b21c33dbf278c691aaa8cf3246 2024-11-15T09:37:21,136 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:37:21,136 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:37:21,136 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:37:21,136 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:37:21,136 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:37:21,142 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/default/TestLogRolling-testLogRollOnPipelineRestart/ddc261b21c33dbf278c691aaa8cf3246/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-15T09:37:21,143 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:37:21,143 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ddc261b21c33dbf278c691aaa8cf3246: Waiting for close lock at 1731663441135Running coprocessor pre-close hooks at 1731663441135Disabling compacts and flushes for region at 1731663441135Disabling writes for close at 1731663441135Writing region close event to WAL at 1731663441136 (+1 ms)Running coprocessor post-close hooks at 1731663441143 (+7 ms)Closed at 1731663441143 2024-11-15T09:37:21,143 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731663415934.ddc261b21c33dbf278c691aaa8cf3246. 2024-11-15T09:37:21,144 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T09:37:21,144 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:37:21,144 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:37:21,144 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663441136Running coprocessor pre-close hooks at 1731663441136Disabling compacts and flushes for region at 1731663441136Disabling writes for close at 1731663441136Writing region close event to WAL at 1731663441139 (+3 ms)Running coprocessor post-close hooks at 1731663441144 (+5 ms)Closed at 1731663441144 2024-11-15T09:37:21,144 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:37:21,281 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:37:21,295 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T09:37:21,295 INFO [regionserver/791f12959b23:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T09:37:21,336 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,32911,1731663414789; all regions closed. 2024-11-15T09:37:21,336 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,336 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,336 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,337 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,337 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:21,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741840_1023 (size=825) 2024-11-15T09:37:21,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741840_1023 (size=825) 2024-11-15T09:37:21,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:21,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:22,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:22,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:23,432 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T09:37:23,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:23,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:24,606 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:37:24,894 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta after 4001ms 2024-11-15T09:37:24,895 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/WALs/791f12959b23,32911,1731663414789/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta to hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs/791f12959b23%2C32911%2C1731663414789.meta.1731663415780.meta 2024-11-15T09:37:24,898 DEBUG [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs 2024-11-15T09:37:24,898 INFO [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C32911%2C1731663414789.meta:.meta(num 1731663440883) 2024-11-15T09:37:24,899 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:24,899 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:24,899 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:24,899 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:24,899 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:24,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741844_1028 (size=1162) 2024-11-15T09:37:24,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741844_1028 (size=1162) 2024-11-15T09:37:24,906 DEBUG [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs 2024-11-15T09:37:24,906 INFO [RS:0;791f12959b23:32911 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C32911%2C1731663414789:(num 1731663441007) 2024-11-15T09:37:24,907 DEBUG [RS:0;791f12959b23:32911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:37:24,907 INFO [RS:0;791f12959b23:32911 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:37:24,907 INFO [RS:0;791f12959b23:32911 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:37:24,907 INFO [RS:0;791f12959b23:32911 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T09:37:24,907 INFO [RS:0;791f12959b23:32911 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:37:24,907 INFO [RS:0;791f12959b23:32911 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32911 2024-11-15T09:37:24,907 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:37:24,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:24,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:24,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,32911,1731663414789 2024-11-15T09:37:24,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:37:24,952 INFO [RS:0;791f12959b23:32911 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:37:24,962 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,32911,1731663414789] 2024-11-15T09:37:24,973 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,32911,1731663414789 already deleted, retry=false 2024-11-15T09:37:24,973 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,32911,1731663414789 expired; onlineServers=0 2024-11-15T09:37:24,973 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,35237,1731663414625' ***** 2024-11-15T09:37:24,973 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:37:24,973 INFO [M:0;791f12959b23:35237 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:37:24,973 INFO [M:0;791f12959b23:35237 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:37:24,973 DEBUG [M:0;791f12959b23:35237 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:37:24,973 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:37:24,973 DEBUG [M:0;791f12959b23:35237 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:37:24,973 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663415143 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663415143,5,FailOnTimeoutGroup] 2024-11-15T09:37:24,973 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663415143 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663415143,5,FailOnTimeoutGroup] 2024-11-15T09:37:24,973 INFO [M:0;791f12959b23:35237 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:37:24,974 INFO [M:0;791f12959b23:35237 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:37:24,974 DEBUG [M:0;791f12959b23:35237 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:37:24,974 INFO [M:0;791f12959b23:35237 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:37:24,974 INFO [M:0;791f12959b23:35237 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:37:24,974 INFO [M:0;791f12959b23:35237 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:37:24,974 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:37:24,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:37:24,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:24,984 DEBUG [M:0;791f12959b23:35237 {}] zookeeper.ZKUtil(347): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:37:24,984 WARN [M:0;791f12959b23:35237 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:37:24,985 INFO [M:0;791f12959b23:35237 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/.lastflushedseqids 2024-11-15T09:37:24,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741846_1030 (size=111) 2024-11-15T09:37:24,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741846_1030 (size=111) 2024-11-15T09:37:24,992 INFO [M:0;791f12959b23:35237 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:37:24,992 INFO [M:0;791f12959b23:35237 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:37:24,992 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:37:24,992 INFO [M:0;791f12959b23:35237 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:24,992 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:24,992 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:37:24,992 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:24,992 INFO [M:0;791f12959b23:35237 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-15T09:37:24,993 ERROR [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData-prefix:791f12959b23,35237,1731663414625 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:24,993 WARN [FSHLog-0-hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData-prefix:791f12959b23,35237,1731663414625 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:24,993 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 791f12959b23%2C35237%2C1731663414625:(num 1731663414922) roll requested 2024-11-15T09:37:24,993 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C35237%2C1731663414625.1731663444993 2024-11-15T09:37:25,000 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,000 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663444993 2024-11-15T09:37:25,001 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:25,001 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35197,DS-be48a9a9-1aba-41d2-87b5-51d656ad3f31,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T09:37:25,001 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 2024-11-15T09:37:25,001 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33899:33899),(127.0.0.1/127.0.0.1:36531:36531)] 2024-11-15T09:37:25,002 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 is not closed yet, will try archiving it next time 2024-11-15T09:37:25,002 WARN [IPC Server handler 3 on default port 44521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-15T09:37:25,002 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 after 1ms 2024-11-15T09:37:25,018 DEBUG [M:0;791f12959b23:35237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ee912b6748d4f76bf8efe37e2d1c16a is 82, key is hbase:meta,,1/info:regioninfo/1731663415809/Put/seqid=0 2024-11-15T09:37:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741848_1033 (size=5672) 2024-11-15T09:37:25,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741848_1033 (size=5672) 2024-11-15T09:37:25,024 INFO [M:0;791f12959b23:35237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ee912b6748d4f76bf8efe37e2d1c16a 2024-11-15T09:37:25,045 DEBUG [M:0;791f12959b23:35237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f452c2ae4944878a3a8dba5ce2c9889 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731663416319/Put/seqid=0 2024-11-15T09:37:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741849_1034 (size=6117) 2024-11-15T09:37:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741849_1034 (size=6117) 2024-11-15T09:37:25,051 INFO [M:0;791f12959b23:35237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f452c2ae4944878a3a8dba5ce2c9889 2024-11-15T09:37:25,063 INFO [RS:0;791f12959b23:32911 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:37:25,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:37:25,063 INFO [RS:0;791f12959b23:32911 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,32911,1731663414789; zookeeper connection closed. 2024-11-15T09:37:25,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32911-0x1013ddac0fc0001, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:37:25,063 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35f854f3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35f854f3 2024-11-15T09:37:25,063 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T09:37:25,071 DEBUG [M:0;791f12959b23:35237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aec5f23c863241cb94453083715e3939 is 69, key is 791f12959b23,32911,1731663414789/rs:state/1731663415251/Put/seqid=0 2024-11-15T09:37:25,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741850_1035 (size=5156) 2024-11-15T09:37:25,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741850_1035 (size=5156) 2024-11-15T09:37:25,078 INFO [M:0;791f12959b23:35237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aec5f23c863241cb94453083715e3939 2024-11-15T09:37:25,105 DEBUG [M:0;791f12959b23:35237 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a5da4c3eb8b4939b8e3050105aac9f7 is 52, key is load_balancer_on/state:d/1731663415928/Put/seqid=0 2024-11-15T09:37:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741851_1036 (size=5056) 2024-11-15T09:37:25,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741851_1036 (size=5056) 2024-11-15T09:37:25,110 INFO [M:0;791f12959b23:35237 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a5da4c3eb8b4939b8e3050105aac9f7 2024-11-15T09:37:25,117 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ee912b6748d4f76bf8efe37e2d1c16a as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1ee912b6748d4f76bf8efe37e2d1c16a 2024-11-15T09:37:25,123 INFO [M:0;791f12959b23:35237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1ee912b6748d4f76bf8efe37e2d1c16a, entries=8, sequenceid=56, filesize=5.5 K 2024-11-15T09:37:25,124 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f452c2ae4944878a3a8dba5ce2c9889 as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8f452c2ae4944878a3a8dba5ce2c9889 2024-11-15T09:37:25,131 INFO [M:0;791f12959b23:35237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8f452c2ae4944878a3a8dba5ce2c9889, entries=6, sequenceid=56, filesize=6.0 K 2024-11-15T09:37:25,132 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aec5f23c863241cb94453083715e3939 as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aec5f23c863241cb94453083715e3939 2024-11-15T09:37:25,138 INFO [M:0;791f12959b23:35237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aec5f23c863241cb94453083715e3939, entries=1, sequenceid=56, filesize=5.0 K 2024-11-15T09:37:25,139 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8a5da4c3eb8b4939b8e3050105aac9f7 as hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a5da4c3eb8b4939b8e3050105aac9f7 2024-11-15T09:37:25,145 INFO [M:0;791f12959b23:35237 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8a5da4c3eb8b4939b8e3050105aac9f7, entries=1, sequenceid=56, filesize=4.9 K 2024-11-15T09:37:25,146 INFO [M:0;791f12959b23:35237 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=56, compaction requested=false 2024-11-15T09:37:25,147 INFO [M:0;791f12959b23:35237 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:25,147 DEBUG [M:0;791f12959b23:35237 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663444992Disabling compacts and flushes for region at 1731663444992Disabling writes for close at 1731663444992Obtaining lock to block concurrent updates at 1731663444992Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663444992Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731663444993 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663445003 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663445004 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663445017 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663445017Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663445029 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663445044 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663445044Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663445056 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663445071 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663445071Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663445084 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663445104 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663445104Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@396b2b01: reopening flushed file at 1731663445116 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b95ba9c: reopening flushed file at 1731663445123 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bdc62e1: reopening flushed file at 1731663445131 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a5fba78: reopening flushed file at 1731663445138 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=56, compaction requested=false at 1731663445146 (+8 ms)Writing region close event to WAL at 1731663445147 (+1 ms)Closed at 1731663445147 2024-11-15T09:37:25,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,148 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,148 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,148 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:37:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44421 is added to blk_1073741847_1031 (size=757) 2024-11-15T09:37:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45161 is added to blk_1073741847_1031 (size=757) 2024-11-15T09:37:25,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:25,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:26,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,143 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,433 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T09:37:26,681 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:37:26,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:26,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:26,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:27,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:27,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:28,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:28,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:29,003 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 after 4002ms 2024-11-15T09:37:29,003 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/WALs/791f12959b23,35237,1731663414625/791f12959b23%2C35237%2C1731663414625.1731663414922 to hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/oldWALs/791f12959b23%2C35237%2C1731663414625.1731663414922 2024-11-15T09:37:29,005 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/MasterData/oldWALs/791f12959b23%2C35237%2C1731663414625.1731663414922 to hdfs://localhost:44521/user/jenkins/test-data/c858737a-f05a-7156-7d97-92c19eb26cce/oldWALs/791f12959b23%2C35237%2C1731663414625.1731663414922$masterlocalwal$ 2024-11-15T09:37:29,006 INFO [M:0;791f12959b23:35237 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:37:29,006 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:37:29,006 INFO [M:0;791f12959b23:35237 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35237 2024-11-15T09:37:29,006 INFO [M:0;791f12959b23:35237 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:37:29,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:37:29,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35237-0x1013ddac0fc0000, quorum=127.0.0.1:62135, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:37:29,168 INFO [M:0;791f12959b23:35237 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:37:29,170 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fc558ce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:29,171 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c39138a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:29,171 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:29,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f78a2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:29,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6023e2fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:29,176 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:37:29,176 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:37:29,176 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:37:29,176 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1629767906-172.17.0.2-1731663411931 (Datanode Uuid 399051cd-ea78-4153-9b2b-29d93c63f800) service to localhost/127.0.0.1:44521 2024-11-15T09:37:29,177 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data3/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:29,177 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data4/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:29,177 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:37:29,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d237fac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:29,180 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19016e01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:29,180 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:29,180 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e4fdbc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:29,180 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fbd08cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:29,181 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:37:29,181 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:37:29,181 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:37:29,181 WARN [BP-1629767906-172.17.0.2-1731663411931 heartbeating to localhost/127.0.0.1:44521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1629767906-172.17.0.2-1731663411931 (Datanode Uuid 45e2ab00-e51e-42de-8686-dac1897dbe9a) service to localhost/127.0.0.1:44521 2024-11-15T09:37:29,182 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data1/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:29,182 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/cluster_32746204-db61-aa48-f097-763dde13f178/data/data2/current/BP-1629767906-172.17.0.2-1731663411931 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:37:29,182 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:37:29,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1da660ce{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:37:29,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25a29a07{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:37:29,187 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:37:29,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bfebe40{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:37:29,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d9c9e99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir/,STOPPED} 2024-11-15T09:37:29,195 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:37:29,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:37:29,224 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44521 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44521 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44521 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:44521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=291 (was 167) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3244 (was 3450) 2024-11-15T09:37:29,232 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=291, ProcessCount=11, AvailableMemoryMB=3244 2024-11-15T09:37:29,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.log.dir so I do NOT create it in target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5227efa2-eb01-4c85-506c-15dd4c19c127/hadoop.tmp.dir so I do NOT create it in target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df, deleteOnExit=true 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/test.cache.data in system properties and HBase conf 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:37:29,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:37:29,234 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:37:29,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:37:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:37:29,252 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:37:29,741 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:29,745 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:29,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:29,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:29,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:37:29,758 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:29,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:29,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:29,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d483d07{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/java.io.tmpdir/jetty-localhost-39733-hadoop-hdfs-3_4_1-tests_jar-_-any-16154459128715900637/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:37:29,865 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:39733} 2024-11-15T09:37:29,865 INFO [Time-limited test {}] server.Server(415): Started @196299ms 2024-11-15T09:37:29,879 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:37:29,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:29,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:30,114 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:30,117 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:30,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:30,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:30,118 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:37:30,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:30,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:30,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43d16ee8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/java.io.tmpdir/jetty-localhost-33089-hadoop-hdfs-3_4_1-tests_jar-_-any-1472163039653716821/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:30,227 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:33089} 2024-11-15T09:37:30,227 INFO [Time-limited test {}] server.Server(415): Started @196660ms 2024-11-15T09:37:30,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:37:30,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:37:30,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:37:30,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T09:37:30,228 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:37:30,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:37:30,260 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:37:30,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:37:30,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:37:30,261 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:37:30,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:37:30,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:37:30,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2526c219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/java.io.tmpdir/jetty-localhost-46401-hadoop-hdfs-3_4_1-tests_jar-_-any-16922146061196547582/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:37:30,361 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:46401} 2024-11-15T09:37:30,361 INFO [Time-limited test {}] server.Server(415): Started @196794ms 2024-11-15T09:37:30,362 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:37:30,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:30,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:31,203 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data1/current/BP-783467009-172.17.0.2-1731663449263/current, will proceed with Du for space computation calculation, 2024-11-15T09:37:31,203 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data2/current/BP-783467009-172.17.0.2-1731663449263/current, will proceed with Du for space computation calculation, 2024-11-15T09:37:31,225 WARN [Thread-1611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:37:31,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x417d1e51ef8c0529 with lease ID 0x8053d33055710cfc: Processing first storage report for DS-0550cab5-80a7-4a6d-a394-b5de7b45ea93 from datanode DatanodeRegistration(127.0.0.1:32991, datanodeUuid=fe1cdc2e-f1d4-44d3-9d3a-0ac1e13bd12c, infoPort=46803, infoSecurePort=0, ipcPort=43627, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263) 2024-11-15T09:37:31,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x417d1e51ef8c0529 with lease ID 0x8053d33055710cfc: from storage DS-0550cab5-80a7-4a6d-a394-b5de7b45ea93 node DatanodeRegistration(127.0.0.1:32991, datanodeUuid=fe1cdc2e-f1d4-44d3-9d3a-0ac1e13bd12c, infoPort=46803, infoSecurePort=0, ipcPort=43627, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:31,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x417d1e51ef8c0529 with lease ID 0x8053d33055710cfc: Processing first storage report for DS-1811737d-2b27-4c30-bdca-e3d68399719b from datanode DatanodeRegistration(127.0.0.1:32991, datanodeUuid=fe1cdc2e-f1d4-44d3-9d3a-0ac1e13bd12c, infoPort=46803, infoSecurePort=0, ipcPort=43627, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263) 2024-11-15T09:37:31,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x417d1e51ef8c0529 with lease ID 0x8053d33055710cfc: from storage DS-1811737d-2b27-4c30-bdca-e3d68399719b node DatanodeRegistration(127.0.0.1:32991, datanodeUuid=fe1cdc2e-f1d4-44d3-9d3a-0ac1e13bd12c, infoPort=46803, infoSecurePort=0, ipcPort=43627, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:31,342 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data3/current/BP-783467009-172.17.0.2-1731663449263/current, will proceed with Du for space computation calculation, 2024-11-15T09:37:31,342 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data4/current/BP-783467009-172.17.0.2-1731663449263/current, will proceed with Du for space computation calculation, 2024-11-15T09:37:31,363 WARN [Thread-1634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:37:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9ca4d1477d16289 with lease ID 0x8053d33055710cfd: Processing first storage report for DS-5b509dae-0bca-49ca-ad43-6daa86f3f6e0 from datanode DatanodeRegistration(127.0.0.1:39609, datanodeUuid=d9041e75-1b50-49ef-a0a4-5f0e36569664, infoPort=45865, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263) 2024-11-15T09:37:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9ca4d1477d16289 with lease ID 0x8053d33055710cfd: from storage DS-5b509dae-0bca-49ca-ad43-6daa86f3f6e0 node DatanodeRegistration(127.0.0.1:39609, datanodeUuid=d9041e75-1b50-49ef-a0a4-5f0e36569664, infoPort=45865, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9ca4d1477d16289 with lease ID 0x8053d33055710cfd: Processing first storage report for DS-3ee3c72d-ad06-4ac9-9199-9f8c9c010373 from datanode DatanodeRegistration(127.0.0.1:39609, datanodeUuid=d9041e75-1b50-49ef-a0a4-5f0e36569664, infoPort=45865, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263) 2024-11-15T09:37:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9ca4d1477d16289 with lease ID 0x8053d33055710cfd: from storage DS-3ee3c72d-ad06-4ac9-9199-9f8c9c010373 node DatanodeRegistration(127.0.0.1:39609, datanodeUuid=d9041e75-1b50-49ef-a0a4-5f0e36569664, infoPort=45865, infoSecurePort=0, ipcPort=35429, storageInfo=lv=-57;cid=testClusterID;nsid=2051598300;c=1731663449263), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:37:31,391 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49 2024-11-15T09:37:31,395 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/zookeeper_0, clientPort=61704, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:37:31,396 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61704 2024-11-15T09:37:31,397 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,399 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:37:31,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:37:31,410 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6 with version=8 2024-11-15T09:37:31,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:37:31,413 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:37:31,413 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:37:31,414 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36195 2024-11-15T09:37:31,416 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36195 connecting to ZooKeeper ensemble=127.0.0.1:61704 2024-11-15T09:37:31,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:361950x0, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:37:31,468 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36195-0x1013ddb50ad0000 connected 2024-11-15T09:37:31,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,575 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,577 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:37:31,577 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6, hbase.cluster.distributed=false 2024-11-15T09:37:31,579 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:37:31,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36195 2024-11-15T09:37:31,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36195 2024-11-15T09:37:31,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36195 2024-11-15T09:37:31,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36195 2024-11-15T09:37:31,581 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36195 2024-11-15T09:37:31,600 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:37:31,600 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:37:31,601 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37281 2024-11-15T09:37:31,603 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37281 connecting to ZooKeeper ensemble=127.0.0.1:61704 2024-11-15T09:37:31,603 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,605 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372810x0, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:37:31,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:372810x0, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:37:31,615 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37281-0x1013ddb50ad0001 connected 2024-11-15T09:37:31,615 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:37:31,616 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:37:31,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:37:31,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:37:31,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37281 2024-11-15T09:37:31,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37281 2024-11-15T09:37:31,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37281 2024-11-15T09:37:31,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37281 2024-11-15T09:37:31,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37281 2024-11-15T09:37:31,632 DEBUG [M:0;791f12959b23:36195 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:36195 2024-11-15T09:37:31,632 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,36195,1731663451412 2024-11-15T09:37:31,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:37:31,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:37:31,636 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,36195,1731663451412 2024-11-15T09:37:31,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:37:31,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,647 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:37:31,647 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,36195,1731663451412 from backup master directory 2024-11-15T09:37:31,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:37:31,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,36195,1731663451412 2024-11-15T09:37:31,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:37:31,657 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:37:31,657 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,36195,1731663451412 2024-11-15T09:37:31,662 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/hbase.id] with ID: 7ab5f1e1-2290-4247-ac1d-8ad579a60f7c 2024-11-15T09:37:31,662 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/.tmp/hbase.id 2024-11-15T09:37:31,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:37:31,675 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/.tmp/hbase.id]:[hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/hbase.id] 2024-11-15T09:37:31,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:37:31,687 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:31,687 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:37:31,689 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T09:37:31,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:37:31,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:37:31,711 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:37:31,712 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:37:31,716 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:37:31,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:37:31,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:37:31,725 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store 2024-11-15T09:37:31,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:37:31,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:37:31,733 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:37:31,733 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:37:31,733 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:31,733 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:31,734 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:37:31,734 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:31,734 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:37:31,734 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663451733Disabling compacts and flushes for region at 1731663451733Disabling writes for close at 1731663451734 (+1 ms)Writing region close event to WAL at 1731663451734Closed at 1731663451734 2024-11-15T09:37:31,734 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/.initializing 2024-11-15T09:37:31,734 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/WALs/791f12959b23,36195,1731663451412 2024-11-15T09:37:31,737 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C36195%2C1731663451412, suffix=, logDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/WALs/791f12959b23,36195,1731663451412, archiveDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/oldWALs, maxLogs=10 2024-11-15T09:37:31,737 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C36195%2C1731663451412.1731663451737 2024-11-15T09:37:31,742 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/WALs/791f12959b23,36195,1731663451412/791f12959b23%2C36195%2C1731663451412.1731663451737 2024-11-15T09:37:31,752 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46803:46803),(127.0.0.1/127.0.0.1:45865:45865)] 2024-11-15T09:37:31,760 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:37:31,760 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:37:31,761 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,761 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,762 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:37:31,764 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:31,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:37:31,765 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:37:31,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:37:31,767 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:37:31,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:37:31,769 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:37:31,770 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,770 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,771 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,772 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,772 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,773 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:37:31,774 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:37:31,776 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:37:31,777 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879834, jitterRate=0.11876782774925232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:37:31,777 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663451761Initializing all the Stores at 1731663451762 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663451762Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663451762Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663451762Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663451762Cleaning up temporary data from old regions at 1731663451772 (+10 ms)Region opened successfully at 1731663451777 (+5 ms) 2024-11-15T09:37:31,778 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:37:31,781 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5799013, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:37:31,782 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:37:31,782 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:37:31,782 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:37:31,782 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:37:31,783 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T09:37:31,783 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T09:37:31,783 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:37:31,786 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:37:31,787 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:37:31,793 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:37:31,794 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:37:31,795 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:37:31,804 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:37:31,805 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:37:31,806 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:37:31,814 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:37:31,816 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:37:31,825 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:37:31,827 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:37:31,835 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:37:31,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:37:31,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:37:31,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,847 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,36195,1731663451412, sessionid=0x1013ddb50ad0000, setting cluster-up flag (Was=false) 2024-11-15T09:37:31,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,899 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:37:31,900 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,36195,1731663451412 2024-11-15T09:37:31,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:31,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:31,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:31,951 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:37:31,952 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,36195,1731663451412 2024-11-15T09:37:31,954 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:37:31,955 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:37:31,956 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:37:31,956 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:37:31,956 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,36195,1731663451412 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:31,957 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:37:31,958 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663481959 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:37:31,959 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:37:31,959 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:37:31,959 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:31,960 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:37:31,960 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:37:31,960 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:37:31,960 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:37:31,960 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:37:31,960 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663451960,5,FailOnTimeoutGroup] 2024-11-15T09:37:31,961 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,961 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663451960,5,FailOnTimeoutGroup] 2024-11-15T09:37:31,961 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:31,961 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:37:31,961 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:31,961 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:31,961 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:37:31,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:37:31,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:37:31,970 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:37:31,970 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6 2024-11-15T09:37:31,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:37:31,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:37:31,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:37:31,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:37:31,979 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:37:31,979 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:31,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:37:31,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:37:31,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:31,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:37:31,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:37:31,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:31,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:37:31,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:37:31,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:31,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:31,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:37:31,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740 2024-11-15T09:37:31,987 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740 2024-11-15T09:37:31,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:37:31,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:37:31,988 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:37:31,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:37:31,991 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:37:31,992 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794841, jitterRate=0.010693460702896118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:37:31,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663451977Initializing all the Stores at 1731663451978 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663451978Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663451978Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663451978Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663451978Cleaning up temporary data from old regions at 1731663451988 (+10 ms)Region opened successfully at 1731663451992 (+4 ms) 2024-11-15T09:37:31,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:37:31,992 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:37:31,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:37:31,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:37:31,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:37:31,993 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:37:31,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663451992Disabling compacts and flushes for region at 1731663451992Disabling writes for close at 1731663451992Writing region close event to WAL at 1731663451993 (+1 ms)Closed at 1731663451993 2024-11-15T09:37:31,994 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:37:31,994 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:37:31,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:37:31,996 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:37:31,997 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:37:32,021 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(746): ClusterId : 7ab5f1e1-2290-4247-ac1d-8ad579a60f7c 2024-11-15T09:37:32,021 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:37:32,026 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:37:32,026 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:37:32,036 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:37:32,037 DEBUG [RS:0;791f12959b23:37281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d1b102d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:37:32,051 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:37281 2024-11-15T09:37:32,052 INFO [RS:0;791f12959b23:37281 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:37:32,052 INFO [RS:0;791f12959b23:37281 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:37:32,052 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:37:32,052 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,36195,1731663451412 with port=37281, startcode=1731663451600 2024-11-15T09:37:32,052 DEBUG [RS:0;791f12959b23:37281 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:37:32,054 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:37:32,055 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36195 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,37281,1731663451600 2024-11-15T09:37:32,055 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36195 {}] master.ServerManager(517): Registering regionserver=791f12959b23,37281,1731663451600 2024-11-15T09:37:32,056 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6 2024-11-15T09:37:32,056 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42325 2024-11-15T09:37:32,056 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:37:32,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:37:32,068 DEBUG [RS:0;791f12959b23:37281 {}] zookeeper.ZKUtil(111): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,37281,1731663451600 2024-11-15T09:37:32,068 WARN [RS:0;791f12959b23:37281 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:37:32,068 INFO [RS:0;791f12959b23:37281 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:37:32,068 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600 2024-11-15T09:37:32,068 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,37281,1731663451600] 2024-11-15T09:37:32,071 INFO [RS:0;791f12959b23:37281 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:37:32,073 INFO [RS:0;791f12959b23:37281 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:37:32,073 INFO [RS:0;791f12959b23:37281 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:37:32,073 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,073 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:37:32,074 INFO [RS:0;791f12959b23:37281 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:37:32,074 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,074 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,074 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,074 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,074 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:37:32,075 DEBUG [RS:0;791f12959b23:37281 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:37:32,076 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,076 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,076 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,076 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,076 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,076 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37281,1731663451600-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:37:32,092 INFO [RS:0;791f12959b23:37281 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:37:32,092 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37281,1731663451600-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,092 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,093 INFO [RS:0;791f12959b23:37281 {}] regionserver.Replication(171): 791f12959b23,37281,1731663451600 started 2024-11-15T09:37:32,106 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,107 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,37281,1731663451600, RpcServer on 791f12959b23/172.17.0.2:37281, sessionid=0x1013ddb50ad0001 2024-11-15T09:37:32,107 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:37:32,107 DEBUG [RS:0;791f12959b23:37281 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,37281,1731663451600 2024-11-15T09:37:32,107 DEBUG [RS:0;791f12959b23:37281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,37281,1731663451600' 2024-11-15T09:37:32,107 DEBUG [RS:0;791f12959b23:37281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:37:32,107 DEBUG [RS:0;791f12959b23:37281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:37:32,108 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:37:32,108 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:37:32,108 DEBUG [RS:0;791f12959b23:37281 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,37281,1731663451600 2024-11-15T09:37:32,108 DEBUG [RS:0;791f12959b23:37281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,37281,1731663451600' 2024-11-15T09:37:32,108 DEBUG [RS:0;791f12959b23:37281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:37:32,108 DEBUG [RS:0;791f12959b23:37281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:37:32,109 DEBUG [RS:0;791f12959b23:37281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:37:32,109 INFO [RS:0;791f12959b23:37281 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:37:32,109 INFO [RS:0;791f12959b23:37281 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:37:32,147 WARN [791f12959b23:36195 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:37:32,212 INFO [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37281%2C1731663451600, suffix=, logDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600, archiveDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/oldWALs, maxLogs=32 2024-11-15T09:37:32,213 INFO [RS:0;791f12959b23:37281 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37281%2C1731663451600.1731663452212 2024-11-15T09:37:32,219 INFO [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663452212 2024-11-15T09:37:32,220 DEBUG [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46803:46803),(127.0.0.1/127.0.0.1:45865:45865)] 2024-11-15T09:37:32,397 DEBUG [791f12959b23:36195 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:37:32,398 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,37281,1731663451600 2024-11-15T09:37:32,400 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,37281,1731663451600, state=OPENING 2024-11-15T09:37:32,467 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:37:32,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:32,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:37:32,479 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:37:32,479 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:37:32,479 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:37:32,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,37281,1731663451600}] 2024-11-15T09:37:32,633 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:37:32,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53333, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:37:32,639 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:37:32,639 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:37:32,642 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37281%2C1731663451600.meta, suffix=.meta, logDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600, archiveDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/oldWALs, maxLogs=32 2024-11-15T09:37:32,642 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37281%2C1731663451600.meta.1731663452642.meta 2024-11-15T09:37:32,648 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.meta.1731663452642.meta 2024-11-15T09:37:32,650 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45865:45865),(127.0.0.1/127.0.0.1:46803:46803)] 2024-11-15T09:37:32,651 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:37:32,651 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:37:32,651 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:37:32,652 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:37:32,652 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:37:32,652 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:37:32,652 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:37:32,652 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:37:32,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:37:32,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:37:32,655 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:32,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:32,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:37:32,656 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:37:32,656 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:32,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:32,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:37:32,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:37:32,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:32,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:32,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:37:32,659 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:37:32,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:32,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:37:32,660 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:37:32,661 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740 2024-11-15T09:37:32,662 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740 2024-11-15T09:37:32,664 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:37:32,664 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:37:32,664 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:37:32,666 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:37:32,667 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828208, jitterRate=0.05312153697013855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:37:32,667 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:37:32,667 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663452652Writing region info on filesystem at 1731663452652Initializing all the Stores at 1731663452653 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663452653Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663452653Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663452653Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663452653Cleaning up temporary data from old regions at 1731663452664 (+11 ms)Running coprocessor post-open hooks at 1731663452667 (+3 ms)Region opened successfully at 1731663452667 2024-11-15T09:37:32,669 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663452633 2024-11-15T09:37:32,671 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:37:32,671 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:37:32,672 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,37281,1731663451600 2024-11-15T09:37:32,673 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,37281,1731663451600, state=OPEN 2024-11-15T09:37:32,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:37:32,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:37:32,741 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,37281,1731663451600 2024-11-15T09:37:32,741 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:37:32,741 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:37:32,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:37:32,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,37281,1731663451600 in 262 msec 2024-11-15T09:37:32,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:37:32,748 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 751 msec 2024-11-15T09:37:32,749 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:37:32,749 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:37:32,751 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:37:32,751 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,37281,1731663451600, seqNum=-1] 2024-11-15T09:37:32,752 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:37:32,753 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60919, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:37:32,760 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 803 msec 2024-11-15T09:37:32,760 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663452760, completionTime=-1 2024-11-15T09:37:32,760 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:37:32,760 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:37:32,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:37:32,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663512762 2024-11-15T09:37:32,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663572762 2024-11-15T09:37:32,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T09:37:32,762 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36195,1731663451412-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,763 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36195,1731663451412-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,763 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36195,1731663451412-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,763 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:36195, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,763 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,763 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,765 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.110sec 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36195,1731663451412-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:37:32,767 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36195,1731663451412-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:37:32,770 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:37:32,770 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:37:32,770 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36195,1731663451412-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:37:32,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74cca69f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:37:32,821 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,36195,-1 for getting cluster id 2024-11-15T09:37:32,821 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:37:32,823 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7ab5f1e1-2290-4247-ac1d-8ad579a60f7c' 2024-11-15T09:37:32,824 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:37:32,824 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7ab5f1e1-2290-4247-ac1d-8ad579a60f7c" 2024-11-15T09:37:32,824 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e66401f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:37:32,824 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,36195,-1] 2024-11-15T09:37:32,825 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:37:32,825 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:37:32,826 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39092, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:37:32,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3764e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:37:32,828 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:37:32,829 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,37281,1731663451600, seqNum=-1] 2024-11-15T09:37:32,829 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:37:32,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43740, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:37:32,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,36195,1731663451412 2024-11-15T09:37:32,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:37:32,836 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:37:32,836 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T09:37:32,837 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 791f12959b23,36195,1731663451412 2024-11-15T09:37:32,837 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@13640fbc 2024-11-15T09:37:32,838 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T09:37:32,839 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39102, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T09:37:32,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T09:37:32,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T09:37:32,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:37:32,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:32,842 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T09:37:32,842 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:32,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-15T09:37:32,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:37:32,844 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T09:37:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741835_1011 (size=405) 2024-11-15T09:37:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741835_1011 (size=405) 2024-11-15T09:37:32,852 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2635425bd0e1adb1194a8a56044a84fa, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6 2024-11-15T09:37:32,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741836_1012 (size=88) 2024-11-15T09:37:32,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741836_1012 (size=88) 2024-11-15T09:37:32,860 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:37:32,860 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 2635425bd0e1adb1194a8a56044a84fa, disabling compactions & flushes 2024-11-15T09:37:32,860 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:32,860 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:32,860 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. after waiting 0 ms 2024-11-15T09:37:32,860 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:32,860 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:32,860 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2635425bd0e1adb1194a8a56044a84fa: Waiting for close lock at 1731663452860Disabling compacts and flushes for region at 1731663452860Disabling writes for close at 1731663452860Writing region close event to WAL at 1731663452860Closed at 1731663452860 2024-11-15T09:37:32,861 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T09:37:32,862 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731663452862"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663452862"}]},"ts":"1731663452862"} 2024-11-15T09:37:32,864 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T09:37:32,866 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T09:37:32,866 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663452866"}]},"ts":"1731663452866"} 2024-11-15T09:37:32,869 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-15T09:37:32,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2635425bd0e1adb1194a8a56044a84fa, ASSIGN}] 2024-11-15T09:37:32,870 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2635425bd0e1adb1194a8a56044a84fa, ASSIGN 2024-11-15T09:37:32,871 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2635425bd0e1adb1194a8a56044a84fa, ASSIGN; state=OFFLINE, location=791f12959b23,37281,1731663451600; forceNewPlan=false, retain=false 2024-11-15T09:37:32,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:32,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:33,022 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2635425bd0e1adb1194a8a56044a84fa, regionState=OPENING, regionLocation=791f12959b23,37281,1731663451600 2024-11-15T09:37:33,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2635425bd0e1adb1194a8a56044a84fa, ASSIGN because future has completed 2024-11-15T09:37:33,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2635425bd0e1adb1194a8a56044a84fa, server=791f12959b23,37281,1731663451600}] 2024-11-15T09:37:33,184 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:33,184 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2635425bd0e1adb1194a8a56044a84fa, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:37:33,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:37:33,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,185 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,187 INFO [StoreOpener-2635425bd0e1adb1194a8a56044a84fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,189 INFO [StoreOpener-2635425bd0e1adb1194a8a56044a84fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2635425bd0e1adb1194a8a56044a84fa columnFamilyName info 2024-11-15T09:37:33,189 DEBUG [StoreOpener-2635425bd0e1adb1194a8a56044a84fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:37:33,190 INFO [StoreOpener-2635425bd0e1adb1194a8a56044a84fa-1 {}] regionserver.HStore(327): Store=2635425bd0e1adb1194a8a56044a84fa/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:37:33,190 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,191 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,191 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,192 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,192 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,195 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,197 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:37:33,198 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2635425bd0e1adb1194a8a56044a84fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861723, jitterRate=0.09573790431022644}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:37:33,198 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:37:33,199 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2635425bd0e1adb1194a8a56044a84fa: Running coprocessor pre-open hook at 1731663453185Writing region info on filesystem at 1731663453185Initializing all the Stores at 1731663453186 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663453186Cleaning up temporary data from old regions at 1731663453192 (+6 ms)Running coprocessor post-open hooks at 1731663453198 (+6 ms)Region opened successfully at 1731663453199 (+1 ms) 2024-11-15T09:37:33,201 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa., pid=6, masterSystemTime=1731663453179 2024-11-15T09:37:33,203 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:33,203 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:33,205 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2635425bd0e1adb1194a8a56044a84fa, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,37281,1731663451600 2024-11-15T09:37:33,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2635425bd0e1adb1194a8a56044a84fa, server=791f12959b23,37281,1731663451600 because future has completed 2024-11-15T09:37:33,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T09:37:33,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2635425bd0e1adb1194a8a56044a84fa, server=791f12959b23,37281,1731663451600 in 183 msec 2024-11-15T09:37:33,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T09:37:33,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2635425bd0e1adb1194a8a56044a84fa, ASSIGN in 343 msec 2024-11-15T09:37:33,216 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T09:37:33,216 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663453216"}]},"ts":"1731663453216"} 2024-11-15T09:37:33,218 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-15T09:37:33,219 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T09:37:33,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 379 msec 2024-11-15T09:37:33,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:33,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:34,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:34,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:35,730 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:37:35,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:37:35,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:35,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:36,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:36,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:37,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:37,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:38,071 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T09:37:38,072 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-15T09:37:38,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:38,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:39,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:39,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:40,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:37:40,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T09:37:40,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:37:40,228 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T09:37:40,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:37:40,228 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T09:37:40,229 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:40,229 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T09:37:40,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:40,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:41,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:41,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:37:42,898 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T09:37:42,898 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-15T09:37:42,902 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:42,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:42,907 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa., hostname=791f12959b23,37281,1731663451600, seqNum=2] 2024-11-15T09:37:42,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:42,922 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T09:37:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T09:37:42,923 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T09:37:42,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T09:37:42,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:42,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:43,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37281 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-15T09:37:43,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:43,085 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 2635425bd0e1adb1194a8a56044a84fa 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T09:37:43,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/b5d2e5ccd1804c1180132e001a6fa0d4 is 1080, key is row0001/info:/1731663462908/Put/seqid=0 2024-11-15T09:37:43,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741837_1013 (size=6033) 2024-11-15T09:37:43,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741837_1013 (size=6033) 2024-11-15T09:37:43,106 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/b5d2e5ccd1804c1180132e001a6fa0d4 2024-11-15T09:37:43,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/b5d2e5ccd1804c1180132e001a6fa0d4 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/b5d2e5ccd1804c1180132e001a6fa0d4 2024-11-15T09:37:43,119 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/b5d2e5ccd1804c1180132e001a6fa0d4, entries=1, sequenceid=5, filesize=5.9 K 2024-11-15T09:37:43,120 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2635425bd0e1adb1194a8a56044a84fa in 35ms, sequenceid=5, compaction requested=false 2024-11-15T09:37:43,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 2635425bd0e1adb1194a8a56044a84fa: 2024-11-15T09:37:43,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:43,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-15T09:37:43,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-15T09:37:43,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T09:37:43,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-15T09:37:43,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 213 msec 2024-11-15T09:37:43,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:43,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:44,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:44,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:45,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:45,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:46,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:46,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:47,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:47,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:48,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:48,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:49,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:49,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:50,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:50,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:51,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:51,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T09:37:52,947 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T09:37:52,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:52,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:52,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 after 68056ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:52,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta after 68042ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T09:37:52,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:52,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:37:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T09:37:52,956 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T09:37:52,957 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T09:37:52,957 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T09:37:53,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37281 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-15T09:37:53,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:53,112 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 2635425bd0e1adb1194a8a56044a84fa 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T09:37:53,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/21ebc45527304588aae6aa882309f40e is 1080, key is row0002/info:/1731663472949/Put/seqid=0 2024-11-15T09:37:53,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741838_1014 (size=6033) 2024-11-15T09:37:53,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741838_1014 (size=6033) 2024-11-15T09:37:53,125 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/21ebc45527304588aae6aa882309f40e 2024-11-15T09:37:53,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/21ebc45527304588aae6aa882309f40e as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/21ebc45527304588aae6aa882309f40e 2024-11-15T09:37:53,141 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/21ebc45527304588aae6aa882309f40e, entries=1, sequenceid=9, filesize=5.9 K 2024-11-15T09:37:53,142 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2635425bd0e1adb1194a8a56044a84fa in 31ms, sequenceid=9, compaction requested=false 2024-11-15T09:37:53,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 2635425bd0e1adb1194a8a56044a84fa: 2024-11-15T09:37:53,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:37:53,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-15T09:37:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-15T09:37:53,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-15T09:37:53,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-11-15T09:37:53,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-15T09:37:53,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:53,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:54,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:54,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:55,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:55,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:56,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:56,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:57,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:57,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:58,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:58,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:59,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:37:59,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:00,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:00,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:01,391 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:38:01,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:01,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:02,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:02,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T09:38:02,968 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T09:38:02,972 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37281%2C1731663451600.1731663482972 2024-11-15T09:38:02,978 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:02,978 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:02,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:02,979 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:02,979 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:02,979 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663452212 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663482972 2024-11-15T09:38:02,980 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46803:46803),(127.0.0.1/127.0.0.1:45865:45865)] 2024-11-15T09:38:02,980 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663452212 is not closed yet, will try archiving it next time 2024-11-15T09:38:02,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:38:02,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741833_1009 (size=5546) 2024-11-15T09:38:02,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741833_1009 (size=5546) 2024-11-15T09:38:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:38:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T09:38:02,984 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T09:38:02,986 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T09:38:02,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T09:38:03,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37281 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-15T09:38:03,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:03,140 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 2635425bd0e1adb1194a8a56044a84fa 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T09:38:03,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e is 1080, key is row0003/info:/1731663482970/Put/seqid=0 2024-11-15T09:38:03,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741840_1016 (size=6033) 2024-11-15T09:38:03,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741840_1016 (size=6033) 2024-11-15T09:38:03,151 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e 2024-11-15T09:38:03,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e 2024-11-15T09:38:03,165 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e, entries=1, sequenceid=13, filesize=5.9 K 2024-11-15T09:38:03,166 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2635425bd0e1adb1194a8a56044a84fa in 26ms, sequenceid=13, compaction requested=true 2024-11-15T09:38:03,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 2635425bd0e1adb1194a8a56044a84fa: 2024-11-15T09:38:03,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:03,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-15T09:38:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-15T09:38:03,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-15T09:38:03,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-15T09:38:03,173 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-15T09:38:03,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:03,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:04,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:04,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:05,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:05,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:06,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:06,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:07,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:07,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:08,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:08,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:09,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:09,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:10,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:10,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:11,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:11,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:12,801 INFO [master/791f12959b23:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T09:38:12,801 INFO [master/791f12959b23:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T09:38:12,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:12,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:12,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T09:38:12,998 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T09:38:12,998 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:13,000 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:13,000 DEBUG [Time-limited test {}] regionserver.HStore(1541): 2635425bd0e1adb1194a8a56044a84fa/info is initiating minor compaction (all files) 2024-11-15T09:38:13,000 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:38:13,000 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:13,000 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 2635425bd0e1adb1194a8a56044a84fa/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:13,001 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/b5d2e5ccd1804c1180132e001a6fa0d4, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/21ebc45527304588aae6aa882309f40e, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e] into tmpdir=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp, totalSize=17.7 K 2024-11-15T09:38:13,001 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b5d2e5ccd1804c1180132e001a6fa0d4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731663462908 2024-11-15T09:38:13,002 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 21ebc45527304588aae6aa882309f40e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731663472949 2024-11-15T09:38:13,002 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4c7fa978f4a14c5a86e21eeaa8ec6c6e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731663482970 2024-11-15T09:38:13,016 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 2635425bd0e1adb1194a8a56044a84fa#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:13,017 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/7f9dfbb3583a433d82f5ce43fd96b98d is 1080, key is row0001/info:/1731663462908/Put/seqid=0 2024-11-15T09:38:13,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741841_1017 (size=8296) 2024-11-15T09:38:13,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741841_1017 (size=8296) 2024-11-15T09:38:13,036 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/7f9dfbb3583a433d82f5ce43fd96b98d as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/7f9dfbb3583a433d82f5ce43fd96b98d 2024-11-15T09:38:13,043 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2635425bd0e1adb1194a8a56044a84fa/info of 2635425bd0e1adb1194a8a56044a84fa into 7f9dfbb3583a433d82f5ce43fd96b98d(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:13,043 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 2635425bd0e1adb1194a8a56044a84fa: 2024-11-15T09:38:13,045 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37281%2C1731663451600.1731663493045 2024-11-15T09:38:13,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:13,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:13,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:13,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:13,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:13,054 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663482972 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663493045 2024-11-15T09:38:13,054 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46803:46803),(127.0.0.1/127.0.0.1:45865:45865)] 2024-11-15T09:38:13,054 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663482972 is not closed yet, will try archiving it next time 2024-11-15T09:38:13,055 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663452212 to hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/oldWALs/791f12959b23%2C37281%2C1731663451600.1731663452212 2024-11-15T09:38:13,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:38:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:38:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T09:38:13,058 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T09:38:13,059 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T09:38:13,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T09:38:13,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741839_1015 (size=2520) 2024-11-15T09:38:13,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741839_1015 (size=2520) 2024-11-15T09:38:13,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37281 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-15T09:38:13,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:13,212 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 2635425bd0e1adb1194a8a56044a84fa 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T09:38:13,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/82836f3c57af49ff9df33dd0d627fd04 is 1080, key is row0000/info:/1731663493044/Put/seqid=0 2024-11-15T09:38:13,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741843_1019 (size=6033) 2024-11-15T09:38:13,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741843_1019 (size=6033) 2024-11-15T09:38:13,224 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/82836f3c57af49ff9df33dd0d627fd04 2024-11-15T09:38:13,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/82836f3c57af49ff9df33dd0d627fd04 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/82836f3c57af49ff9df33dd0d627fd04 2024-11-15T09:38:13,236 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/82836f3c57af49ff9df33dd0d627fd04, entries=1, sequenceid=18, filesize=5.9 K 2024-11-15T09:38:13,237 INFO [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2635425bd0e1adb1194a8a56044a84fa in 25ms, sequenceid=18, compaction requested=false 2024-11-15T09:38:13,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 2635425bd0e1adb1194a8a56044a84fa: 2024-11-15T09:38:13,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:13,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-15T09:38:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-15T09:38:13,242 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-15T09:38:13,242 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-15T09:38:13,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-15T09:38:13,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:13,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:14,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:14,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:15,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:15,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:16,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:16,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:17,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:17,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:18,185 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2635425bd0e1adb1194a8a56044a84fa, had cached 0 bytes from a total of 14329 2024-11-15T09:38:18,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:18,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:19,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:19,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:20,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:20,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:21,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:21,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:22,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:22,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T09:38:23,078 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T09:38:23,080 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37281%2C1731663451600.1731663503080 2024-11-15T09:38:23,085 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,085 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,085 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,085 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,085 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,086 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663493045 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663503080 2024-11-15T09:38:23,086 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45865:45865),(127.0.0.1/127.0.0.1:46803:46803)] 2024-11-15T09:38:23,086 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663493045 is not closed yet, will try archiving it next time 2024-11-15T09:38:23,086 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/WALs/791f12959b23,37281,1731663451600/791f12959b23%2C37281%2C1731663451600.1731663482972 to hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/oldWALs/791f12959b23%2C37281%2C1731663451600.1731663482972 2024-11-15T09:38:23,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:38:23,087 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:38:23,087 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:38:23,087 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:38:23,087 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:38:23,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741842_1018 (size=2026) 2024-11-15T09:38:23,087 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:38:23,087 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:38:23,087 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=905303875, stopped=false 2024-11-15T09:38:23,087 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,36195,1731663451412 2024-11-15T09:38:23,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741842_1018 (size=2026) 2024-11-15T09:38:23,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:38:23,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:38:23,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:23,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:23,169 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:38:23,169 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:38:23,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:38:23,169 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:38:23,169 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:38:23,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:38:23,170 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,37281,1731663451600' ***** 2024-11-15T09:38:23,170 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:38:23,170 INFO [RS:0;791f12959b23:37281 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:38:23,170 INFO [RS:0;791f12959b23:37281 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:38:23,170 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:38:23,170 INFO [RS:0;791f12959b23:37281 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:38:23,170 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(3091): Received CLOSE for 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,37281,1731663451600 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:37281. 2024-11-15T09:38:23,171 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2635425bd0e1adb1194a8a56044a84fa, disabling compactions & flushes 2024-11-15T09:38:23,171 DEBUG [RS:0;791f12959b23:37281 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:38:23,171 DEBUG [RS:0;791f12959b23:37281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:38:23,171 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:23,171 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:38:23,171 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. after waiting 0 ms 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:38:23,171 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:38:23,171 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2635425bd0e1adb1194a8a56044a84fa 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T09:38:23,171 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T09:38:23,172 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1325): Online Regions={2635425bd0e1adb1194a8a56044a84fa=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa., 1588230740=hbase:meta,,1.1588230740} 2024-11-15T09:38:23,172 DEBUG [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2635425bd0e1adb1194a8a56044a84fa 2024-11-15T09:38:23,172 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:38:23,172 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:38:23,172 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:38:23,172 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:38:23,172 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:38:23,172 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-15T09:38:23,177 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/2481ec9714a1453a8e98614779608879 is 1080, key is row0001/info:/1731663503079/Put/seqid=0 2024-11-15T09:38:23,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741845_1021 (size=6033) 2024-11-15T09:38:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741845_1021 (size=6033) 2024-11-15T09:38:23,183 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/2481ec9714a1453a8e98614779608879 2024-11-15T09:38:23,193 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/info/8ec73fdf91f341899d521f2551340f77 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa./info:regioninfo/1731663453204/Put/seqid=0 2024-11-15T09:38:23,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741846_1022 (size=7308) 2024-11-15T09:38:23,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741846_1022 (size=7308) 2024-11-15T09:38:23,198 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/info/8ec73fdf91f341899d521f2551340f77 2024-11-15T09:38:23,200 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/.tmp/info/2481ec9714a1453a8e98614779608879 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/2481ec9714a1453a8e98614779608879 2024-11-15T09:38:23,209 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/2481ec9714a1453a8e98614779608879, entries=1, sequenceid=22, filesize=5.9 K 2024-11-15T09:38:23,211 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2635425bd0e1adb1194a8a56044a84fa in 40ms, sequenceid=22, compaction requested=true 2024-11-15T09:38:23,216 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/b5d2e5ccd1804c1180132e001a6fa0d4, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/21ebc45527304588aae6aa882309f40e, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e] to archive 2024-11-15T09:38:23,217 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T09:38:23,219 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/b5d2e5ccd1804c1180132e001a6fa0d4 to hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/b5d2e5ccd1804c1180132e001a6fa0d4 2024-11-15T09:38:23,219 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/ns/8a9839df23234f569d33de72ea3ca2e9 is 43, key is default/ns:d/1731663452754/Put/seqid=0 2024-11-15T09:38:23,220 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/21ebc45527304588aae6aa882309f40e to hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/21ebc45527304588aae6aa882309f40e 2024-11-15T09:38:23,222 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e to hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/info/4c7fa978f4a14c5a86e21eeaa8ec6c6e 2024-11-15T09:38:23,223 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=791f12959b23:36195 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T09:38:23,223 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b5d2e5ccd1804c1180132e001a6fa0d4=6033, 21ebc45527304588aae6aa882309f40e=6033, 4c7fa978f4a14c5a86e21eeaa8ec6c6e=6033] 2024-11-15T09:38:23,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741847_1023 (size=5153) 2024-11-15T09:38:23,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741847_1023 (size=5153) 2024-11-15T09:38:23,227 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/ns/8a9839df23234f569d33de72ea3ca2e9 2024-11-15T09:38:23,228 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2635425bd0e1adb1194a8a56044a84fa/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-15T09:38:23,229 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:23,229 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2635425bd0e1adb1194a8a56044a84fa: Waiting for close lock at 1731663503171Running coprocessor pre-close hooks at 1731663503171Disabling compacts and flushes for region at 1731663503171Disabling writes for close at 1731663503171Obtaining lock to block concurrent updates at 1731663503171Preparing flush snapshotting stores in 2635425bd0e1adb1194a8a56044a84fa at 1731663503171Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731663503172 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. at 1731663503173 (+1 ms)Flushing 2635425bd0e1adb1194a8a56044a84fa/info: creating writer at 1731663503173Flushing 2635425bd0e1adb1194a8a56044a84fa/info: appending metadata at 1731663503176 (+3 ms)Flushing 2635425bd0e1adb1194a8a56044a84fa/info: closing flushed file at 1731663503176Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@573058d8: reopening flushed file at 1731663503199 (+23 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2635425bd0e1adb1194a8a56044a84fa in 40ms, sequenceid=22, compaction requested=true at 1731663503211 (+12 ms)Writing region close event to WAL at 1731663503224 (+13 ms)Running coprocessor post-close hooks at 1731663503229 (+5 ms)Closed at 1731663503229 2024-11-15T09:38:23,229 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731663452839.2635425bd0e1adb1194a8a56044a84fa. 2024-11-15T09:38:23,245 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/table/4095f5f8e3db4c23aa6701ab90bde127 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731663453216/Put/seqid=0 2024-11-15T09:38:23,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741848_1024 (size=5508) 2024-11-15T09:38:23,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741848_1024 (size=5508) 2024-11-15T09:38:23,252 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/table/4095f5f8e3db4c23aa6701ab90bde127 2024-11-15T09:38:23,258 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/info/8ec73fdf91f341899d521f2551340f77 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/info/8ec73fdf91f341899d521f2551340f77 2024-11-15T09:38:23,264 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/info/8ec73fdf91f341899d521f2551340f77, entries=10, sequenceid=11, filesize=7.1 K 2024-11-15T09:38:23,265 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/ns/8a9839df23234f569d33de72ea3ca2e9 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/ns/8a9839df23234f569d33de72ea3ca2e9 2024-11-15T09:38:23,271 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/ns/8a9839df23234f569d33de72ea3ca2e9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T09:38:23,272 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/.tmp/table/4095f5f8e3db4c23aa6701ab90bde127 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/table/4095f5f8e3db4c23aa6701ab90bde127 2024-11-15T09:38:23,277 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/table/4095f5f8e3db4c23aa6701ab90bde127, entries=2, sequenceid=11, filesize=5.4 K 2024-11-15T09:38:23,278 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 106ms, sequenceid=11, compaction requested=false 2024-11-15T09:38:23,283 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T09:38:23,283 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:38:23,283 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:38:23,283 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663503172Running coprocessor pre-close hooks at 1731663503172Disabling compacts and flushes for region at 1731663503172Disabling writes for close at 1731663503172Obtaining lock to block concurrent updates at 1731663503172Preparing flush snapshotting stores in 1588230740 at 1731663503172Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731663503173 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731663503173Flushing 1588230740/info: creating writer at 1731663503173Flushing 1588230740/info: appending metadata at 1731663503192 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731663503192Flushing 1588230740/ns: creating writer at 1731663503203 (+11 ms)Flushing 1588230740/ns: appending metadata at 1731663503219 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731663503219Flushing 1588230740/table: creating writer at 1731663503232 (+13 ms)Flushing 1588230740/table: appending metadata at 1731663503245 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731663503245Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@191fc61d: reopening flushed file at 1731663503258 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19bea8c4: reopening flushed file at 1731663503264 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7385dbb6: reopening flushed file at 1731663503271 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 106ms, sequenceid=11, compaction requested=false at 1731663503279 (+8 ms)Writing region close event to WAL at 1731663503280 (+1 ms)Running coprocessor post-close hooks at 1731663503283 (+3 ms)Closed at 1731663503283 2024-11-15T09:38:23,284 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:38:23,372 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,37281,1731663451600; all regions closed. 2024-11-15T09:38:23,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,373 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741834_1010 (size=3306) 2024-11-15T09:38:23,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741834_1010 (size=3306) 2024-11-15T09:38:23,380 DEBUG [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/oldWALs 2024-11-15T09:38:23,380 INFO [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C37281%2C1731663451600.meta:.meta(num 1731663452642) 2024-11-15T09:38:23,380 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,380 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,381 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,381 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741844_1020 (size=1252) 2024-11-15T09:38:23,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741844_1020 (size=1252) 2024-11-15T09:38:23,386 DEBUG [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/oldWALs 2024-11-15T09:38:23,386 INFO [RS:0;791f12959b23:37281 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C37281%2C1731663451600:(num 1731663503080) 2024-11-15T09:38:23,386 DEBUG [RS:0;791f12959b23:37281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:38:23,386 INFO [RS:0;791f12959b23:37281 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:38:23,386 INFO [RS:0;791f12959b23:37281 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:38:23,386 INFO [RS:0;791f12959b23:37281 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T09:38:23,386 INFO [RS:0;791f12959b23:37281 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:38:23,386 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:38:23,387 INFO [RS:0;791f12959b23:37281 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37281 2024-11-15T09:38:23,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,37281,1731663451600 2024-11-15T09:38:23,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:38:23,400 INFO [RS:0;791f12959b23:37281 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:38:23,412 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,37281,1731663451600] 2024-11-15T09:38:23,422 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,37281,1731663451600 already deleted, retry=false 2024-11-15T09:38:23,422 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,37281,1731663451600 expired; onlineServers=0 2024-11-15T09:38:23,422 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,36195,1731663451412' ***** 2024-11-15T09:38:23,422 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:38:23,423 INFO [M:0;791f12959b23:36195 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:38:23,423 INFO [M:0;791f12959b23:36195 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:38:23,423 DEBUG [M:0;791f12959b23:36195 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:38:23,423 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:38:23,423 DEBUG [M:0;791f12959b23:36195 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:38:23,423 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663451960 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663451960,5,FailOnTimeoutGroup] 2024-11-15T09:38:23,423 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663451960 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663451960,5,FailOnTimeoutGroup] 2024-11-15T09:38:23,423 INFO [M:0;791f12959b23:36195 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:38:23,423 INFO [M:0;791f12959b23:36195 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:38:23,423 DEBUG [M:0;791f12959b23:36195 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:38:23,423 INFO [M:0;791f12959b23:36195 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:38:23,424 INFO [M:0;791f12959b23:36195 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:38:23,424 INFO [M:0;791f12959b23:36195 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:38:23,424 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:38:23,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:38:23,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:23,433 DEBUG [M:0;791f12959b23:36195 {}] zookeeper.ZKUtil(347): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:38:23,433 WARN [M:0;791f12959b23:36195 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:38:23,434 INFO [M:0;791f12959b23:36195 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/.lastflushedseqids 2024-11-15T09:38:23,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741849_1025 (size=130) 2024-11-15T09:38:23,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741849_1025 (size=130) 2024-11-15T09:38:23,440 INFO [M:0;791f12959b23:36195 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:38:23,440 INFO [M:0;791f12959b23:36195 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:38:23,440 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:38:23,440 INFO [M:0;791f12959b23:36195 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:23,440 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:23,440 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:38:23,440 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:23,440 INFO [M:0;791f12959b23:36195 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.94 KB 2024-11-15T09:38:23,455 DEBUG [M:0;791f12959b23:36195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c5862e25cc584ce3a15c2819f9a46e69 is 82, key is hbase:meta,,1/info:regioninfo/1731663452672/Put/seqid=0 2024-11-15T09:38:23,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741850_1026 (size=5672) 2024-11-15T09:38:23,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741850_1026 (size=5672) 2024-11-15T09:38:23,460 INFO [M:0;791f12959b23:36195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c5862e25cc584ce3a15c2819f9a46e69 2024-11-15T09:38:23,481 DEBUG [M:0;791f12959b23:36195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c2ece72e0ae44de8c6372c14918ae08 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731663453221/Put/seqid=0 2024-11-15T09:38:23,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741851_1027 (size=7819) 2024-11-15T09:38:23,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741851_1027 (size=7819) 2024-11-15T09:38:23,490 INFO [M:0;791f12959b23:36195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c2ece72e0ae44de8c6372c14918ae08 2024-11-15T09:38:23,495 INFO [M:0;791f12959b23:36195 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1c2ece72e0ae44de8c6372c14918ae08 2024-11-15T09:38:23,511 DEBUG [M:0;791f12959b23:36195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a82c0d94d96f499daa80ab769600bddb is 69, key is 791f12959b23,37281,1731663451600/rs:state/1731663452055/Put/seqid=0 2024-11-15T09:38:23,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:38:23,512 INFO [RS:0;791f12959b23:37281 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:38:23,512 INFO [RS:0;791f12959b23:37281 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,37281,1731663451600; zookeeper connection closed. 2024-11-15T09:38:23,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37281-0x1013ddb50ad0001, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:38:23,512 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@73fc3392 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@73fc3392 2024-11-15T09:38:23,513 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T09:38:23,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741852_1028 (size=5156) 2024-11-15T09:38:23,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741852_1028 (size=5156) 2024-11-15T09:38:23,515 INFO [M:0;791f12959b23:36195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a82c0d94d96f499daa80ab769600bddb 2024-11-15T09:38:23,535 DEBUG [M:0;791f12959b23:36195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98ceab1a13dc49ca90ee370ef9ae1b30 is 52, key is load_balancer_on/state:d/1731663452834/Put/seqid=0 2024-11-15T09:38:23,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741853_1029 (size=5056) 2024-11-15T09:38:23,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741853_1029 (size=5056) 2024-11-15T09:38:23,542 INFO [M:0;791f12959b23:36195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98ceab1a13dc49ca90ee370ef9ae1b30 2024-11-15T09:38:23,548 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c5862e25cc584ce3a15c2819f9a46e69 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c5862e25cc584ce3a15c2819f9a46e69 2024-11-15T09:38:23,553 INFO [M:0;791f12959b23:36195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c5862e25cc584ce3a15c2819f9a46e69, entries=8, sequenceid=121, filesize=5.5 K 2024-11-15T09:38:23,554 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c2ece72e0ae44de8c6372c14918ae08 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c2ece72e0ae44de8c6372c14918ae08 2024-11-15T09:38:23,560 INFO [M:0;791f12959b23:36195 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1c2ece72e0ae44de8c6372c14918ae08 2024-11-15T09:38:23,560 INFO [M:0;791f12959b23:36195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c2ece72e0ae44de8c6372c14918ae08, entries=14, sequenceid=121, filesize=7.6 K 2024-11-15T09:38:23,561 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a82c0d94d96f499daa80ab769600bddb as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a82c0d94d96f499daa80ab769600bddb 2024-11-15T09:38:23,567 INFO [M:0;791f12959b23:36195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a82c0d94d96f499daa80ab769600bddb, entries=1, sequenceid=121, filesize=5.0 K 2024-11-15T09:38:23,568 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/98ceab1a13dc49ca90ee370ef9ae1b30 as hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/98ceab1a13dc49ca90ee370ef9ae1b30 2024-11-15T09:38:23,574 INFO [M:0;791f12959b23:36195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42325/user/jenkins/test-data/494470ef-d57f-8f8a-619b-7f0b93926cc6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/98ceab1a13dc49ca90ee370ef9ae1b30, entries=1, sequenceid=121, filesize=4.9 K 2024-11-15T09:38:23,575 INFO [M:0;791f12959b23:36195 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false 2024-11-15T09:38:23,580 INFO [M:0;791f12959b23:36195 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:23,580 DEBUG [M:0;791f12959b23:36195 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663503440Disabling compacts and flushes for region at 1731663503440Disabling writes for close at 1731663503440Obtaining lock to block concurrent updates at 1731663503440Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663503440Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44599, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1731663503441 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663503441Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663503441Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663503454 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663503454Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663503464 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663503481 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663503481Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663503495 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663503510 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663503510Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663503520 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663503534 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663503534Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1971a590: reopening flushed file at 1731663503547 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1420b05f: reopening flushed file at 1731663503553 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@611a9dd2: reopening flushed file at 1731663503560 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@366b211: reopening flushed file at 1731663503567 (+7 ms)Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false at 1731663503575 (+8 ms)Writing region close event to WAL at 1731663503580 (+5 ms)Closed at 1731663503580 2024-11-15T09:38:23,580 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,581 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,581 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,581 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:38:23,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39609 is added to blk_1073741830_1006 (size=52996) 2024-11-15T09:38:23,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32991 is added to blk_1073741830_1006 (size=52996) 2024-11-15T09:38:23,583 INFO [M:0;791f12959b23:36195 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:38:23,583 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:38:23,583 INFO [M:0;791f12959b23:36195 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36195 2024-11-15T09:38:23,584 INFO [M:0;791f12959b23:36195 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:38:23,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:38:23,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36195-0x1013ddb50ad0000, quorum=127.0.0.1:61704, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:38:23,691 INFO [M:0;791f12959b23:36195 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:38:23,693 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2526c219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:38:23,694 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:38:23,694 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:38:23,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:38:23,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir/,STOPPED} 2024-11-15T09:38:23,696 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:38:23,696 WARN [BP-783467009-172.17.0.2-1731663449263 heartbeating to localhost/127.0.0.1:42325 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:38:23,696 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:38:23,696 WARN [BP-783467009-172.17.0.2-1731663449263 heartbeating to localhost/127.0.0.1:42325 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-783467009-172.17.0.2-1731663449263 (Datanode Uuid d9041e75-1b50-49ef-a0a4-5f0e36569664) service to localhost/127.0.0.1:42325 2024-11-15T09:38:23,696 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data3/current/BP-783467009-172.17.0.2-1731663449263 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:38:23,697 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data4/current/BP-783467009-172.17.0.2-1731663449263 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:38:23,697 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:38:23,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43d16ee8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:38:23,701 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:38:23,701 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:38:23,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:38:23,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir/,STOPPED} 2024-11-15T09:38:23,703 WARN [BP-783467009-172.17.0.2-1731663449263 heartbeating to localhost/127.0.0.1:42325 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:38:23,703 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:38:23,703 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:38:23,703 WARN [BP-783467009-172.17.0.2-1731663449263 heartbeating to localhost/127.0.0.1:42325 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-783467009-172.17.0.2-1731663449263 (Datanode Uuid fe1cdc2e-f1d4-44d3-9d3a-0ac1e13bd12c) service to localhost/127.0.0.1:42325 2024-11-15T09:38:23,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data1/current/BP-783467009-172.17.0.2-1731663449263 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:38:23,704 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/cluster_f6288a7c-0531-088b-9fd8-3c975b07d6df/data/data2/current/BP-783467009-172.17.0.2-1731663449263 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:38:23,704 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:38:23,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d483d07{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:38:23,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:38:23,710 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:38:23,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:38:23,711 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir/,STOPPED} 2024-11-15T09:38:23,717 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:38:23,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:38:23,754 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/791f12959b23:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42325 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42325 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42325 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42325 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42325 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42325 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42325 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42325 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 291), ProcessCount=11 (was 11), AvailableMemoryMB=3125 (was 3244) 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=3125 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.log.dir so I do NOT create it in target/test-data/871613f3-4971-f54f-5184-547f5dabb95e 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ec62f18-fff2-9b11-679e-f33f17caea49/hadoop.tmp.dir so I do NOT create it in target/test-data/871613f3-4971-f54f-5184-547f5dabb95e 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c, deleteOnExit=true 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/test.cache.data in system properties and HBase conf 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:38:23,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:38:23,764 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:38:23,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:38:23,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:38:23,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:38:23,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:38:23,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:38:23,780 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:38:23,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:23,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:24,078 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:38:24,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:38:24,252 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:38:24,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:38:24,253 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:38:24,253 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:38:24,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:38:24,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2996c87f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:38:24,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bc081d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:38:24,357 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@551592b1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/java.io.tmpdir/jetty-localhost-46095-hadoop-hdfs-3_4_1-tests_jar-_-any-10715600214758975003/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:38:24,358 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e1b4695{HTTP/1.1, (http/1.1)}{localhost:46095} 2024-11-15T09:38:24,358 INFO [Time-limited test {}] server.Server(415): Started @250791ms 2024-11-15T09:38:24,369 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:38:24,654 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:38:24,657 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:38:24,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:38:24,658 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:38:24,658 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T09:38:24,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37b300d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:38:24,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77b370f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:38:24,763 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b32401d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/java.io.tmpdir/jetty-localhost-45257-hadoop-hdfs-3_4_1-tests_jar-_-any-11194181112720338967/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:38:24,764 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60b9b83d{HTTP/1.1, (http/1.1)}{localhost:45257} 2024-11-15T09:38:24,764 INFO [Time-limited test {}] server.Server(415): Started @251197ms 2024-11-15T09:38:24,765 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:38:24,794 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:38:24,796 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:38:24,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:38:24,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:38:24,797 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:38:24,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47c8059{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:38:24,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d4c2da4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:38:24,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3323ea67{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/java.io.tmpdir/jetty-localhost-42795-hadoop-hdfs-3_4_1-tests_jar-_-any-14172782458079923525/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:38:24,903 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fc2e7d1{HTTP/1.1, (http/1.1)}{localhost:42795} 2024-11-15T09:38:24,903 INFO [Time-limited test {}] server.Server(415): Started @251336ms 2024-11-15T09:38:24,904 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:38:24,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:24,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:25,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:25,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:25,992 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data1/current/BP-253373432-172.17.0.2-1731663503792/current, will proceed with Du for space computation calculation, 2024-11-15T09:38:25,992 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data2/current/BP-253373432-172.17.0.2-1731663503792/current, will proceed with Du for space computation calculation, 2024-11-15T09:38:26,007 WARN [Thread-1927 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:38:26,009 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe217ecee17534c9 with lease ID 0x9cb738f43b1dda0e: Processing first storage report for DS-6b219575-f3b5-42c6-b529-92660701c52f from datanode DatanodeRegistration(127.0.0.1:35603, datanodeUuid=7ccca9ce-ce10-4210-b4d7-f84f5097cfbe, infoPort=43221, infoSecurePort=0, ipcPort=41711, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792) 2024-11-15T09:38:26,009 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe217ecee17534c9 with lease ID 0x9cb738f43b1dda0e: from storage DS-6b219575-f3b5-42c6-b529-92660701c52f node DatanodeRegistration(127.0.0.1:35603, datanodeUuid=7ccca9ce-ce10-4210-b4d7-f84f5097cfbe, infoPort=43221, infoSecurePort=0, ipcPort=41711, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:38:26,009 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe217ecee17534c9 with lease ID 0x9cb738f43b1dda0e: Processing first storage report for DS-aecc712a-3dd3-4b22-bdd1-b0f0dc8b119b from datanode DatanodeRegistration(127.0.0.1:35603, datanodeUuid=7ccca9ce-ce10-4210-b4d7-f84f5097cfbe, infoPort=43221, infoSecurePort=0, ipcPort=41711, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792) 2024-11-15T09:38:26,009 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe217ecee17534c9 with lease ID 0x9cb738f43b1dda0e: from storage DS-aecc712a-3dd3-4b22-bdd1-b0f0dc8b119b node DatanodeRegistration(127.0.0.1:35603, datanodeUuid=7ccca9ce-ce10-4210-b4d7-f84f5097cfbe, infoPort=43221, infoSecurePort=0, ipcPort=41711, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:38:26,123 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data3/current/BP-253373432-172.17.0.2-1731663503792/current, will proceed with Du for space computation calculation, 2024-11-15T09:38:26,123 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data4/current/BP-253373432-172.17.0.2-1731663503792/current, will proceed with Du for space computation calculation, 2024-11-15T09:38:26,144 WARN [Thread-1950 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:38:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4fc967ac272fa45 with lease ID 0x9cb738f43b1dda0f: Processing first storage report for DS-0d813220-6b4d-49ae-8c33-286128109f86 from datanode DatanodeRegistration(127.0.0.1:35841, datanodeUuid=778bcdf5-dacb-4a27-abdc-69e3fd2cfb52, infoPort=36185, infoSecurePort=0, ipcPort=40499, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792) 2024-11-15T09:38:26,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4fc967ac272fa45 with lease ID 0x9cb738f43b1dda0f: from storage DS-0d813220-6b4d-49ae-8c33-286128109f86 node DatanodeRegistration(127.0.0.1:35841, datanodeUuid=778bcdf5-dacb-4a27-abdc-69e3fd2cfb52, infoPort=36185, infoSecurePort=0, ipcPort=40499, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:38:26,147 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe4fc967ac272fa45 with lease ID 0x9cb738f43b1dda0f: Processing first storage report for DS-38185d62-1836-473b-87ff-08d9a7af491f from datanode DatanodeRegistration(127.0.0.1:35841, datanodeUuid=778bcdf5-dacb-4a27-abdc-69e3fd2cfb52, infoPort=36185, infoSecurePort=0, ipcPort=40499, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792) 2024-11-15T09:38:26,147 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4fc967ac272fa45 with lease ID 0x9cb738f43b1dda0f: from storage DS-38185d62-1836-473b-87ff-08d9a7af491f node DatanodeRegistration(127.0.0.1:35841, datanodeUuid=778bcdf5-dacb-4a27-abdc-69e3fd2cfb52, infoPort=36185, infoSecurePort=0, ipcPort=40499, storageInfo=lv=-57;cid=testClusterID;nsid=1151722454;c=1731663503792), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:38:26,233 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e 2024-11-15T09:38:26,236 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/zookeeper_0, clientPort=54078, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:38:26,237 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54078 2024-11-15T09:38:26,237 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:38:26,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:38:26,249 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657 with version=8 2024-11-15T09:38:26,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:38:26,251 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:38:26,251 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:38:26,252 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35857 2024-11-15T09:38:26,254 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35857 connecting to ZooKeeper ensemble=127.0.0.1:54078 2024-11-15T09:38:26,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:358570x0, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:38:26,333 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35857-0x1013ddc26e60000 connected 2024-11-15T09:38:26,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:38:26,420 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657, hbase.cluster.distributed=false 2024-11-15T09:38:26,423 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:38:26,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35857 2024-11-15T09:38:26,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35857 2024-11-15T09:38:26,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35857 2024-11-15T09:38:26,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35857 2024-11-15T09:38:26,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35857 2024-11-15T09:38:26,439 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:38:26,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:38:26,439 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:38:26,440 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:38:26,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:38:26,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:38:26,440 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:38:26,440 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:38:26,440 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37741 2024-11-15T09:38:26,442 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37741 connecting to ZooKeeper ensemble=127.0.0.1:54078 2024-11-15T09:38:26,442 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377410x0, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:38:26,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:377410x0, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:38:26,457 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37741-0x1013ddc26e60001 connected 2024-11-15T09:38:26,458 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:38:26,458 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:38:26,459 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:38:26,460 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:38:26,460 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37741 2024-11-15T09:38:26,460 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37741 2024-11-15T09:38:26,461 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37741 2024-11-15T09:38:26,461 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37741 2024-11-15T09:38:26,461 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37741 2024-11-15T09:38:26,476 DEBUG [M:0;791f12959b23:35857 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:35857 2024-11-15T09:38:26,476 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,35857,1731663506251 2024-11-15T09:38:26,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:38:26,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:38:26,489 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,35857,1731663506251 2024-11-15T09:38:26,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:38:26,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,499 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:38:26,500 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,35857,1731663506251 from backup master directory 2024-11-15T09:38:26,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,35857,1731663506251 2024-11-15T09:38:26,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:38:26,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:38:26,510 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:38:26,510 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,35857,1731663506251 2024-11-15T09:38:26,515 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/hbase.id] with ID: dc934f2f-7761-4543-a102-3f25b17ee455 2024-11-15T09:38:26,515 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/.tmp/hbase.id 2024-11-15T09:38:26,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:38:26,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:38:26,522 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/.tmp/hbase.id]:[hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/hbase.id] 2024-11-15T09:38:26,537 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:26,537 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:38:26,539 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T09:38:26,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:38:26,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:38:26,558 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:38:26,559 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:38:26,559 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:38:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:38:26,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:38:26,567 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store 2024-11-15T09:38:26,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:38:26,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:38:26,576 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:26,576 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:38:26,576 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:26,576 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:26,576 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:38:26,576 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:26,576 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:38:26,577 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663506576Disabling compacts and flushes for region at 1731663506576Disabling writes for close at 1731663506576Writing region close event to WAL at 1731663506576Closed at 1731663506576 2024-11-15T09:38:26,577 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/.initializing 2024-11-15T09:38:26,578 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/WALs/791f12959b23,35857,1731663506251 2024-11-15T09:38:26,581 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C35857%2C1731663506251, suffix=, logDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/WALs/791f12959b23,35857,1731663506251, archiveDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/oldWALs, maxLogs=10 2024-11-15T09:38:26,581 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C35857%2C1731663506251.1731663506581 2024-11-15T09:38:26,589 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/WALs/791f12959b23,35857,1731663506251/791f12959b23%2C35857%2C1731663506251.1731663506581 2024-11-15T09:38:26,594 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36185:36185),(127.0.0.1/127.0.0.1:43221:43221)] 2024-11-15T09:38:26,596 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:38:26,596 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:26,596 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,596 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:38:26,599 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:26,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:38:26,601 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:38:26,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:38:26,603 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,604 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:38:26,604 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,605 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:38:26,605 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:38:26,606 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,606 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,607 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,608 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,608 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,608 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:38:26,609 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:38:26,612 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:38:26,613 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735903, jitterRate=-0.06425142288208008}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:38:26,613 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663506597Initializing all the Stores at 1731663506597Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663506597Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663506598 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663506598Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663506598Cleaning up temporary data from old regions at 1731663506608 (+10 ms)Region opened successfully at 1731663506613 (+5 ms) 2024-11-15T09:38:26,616 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:38:26,619 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30e9e36b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:38:26,620 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:38:26,620 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:38:26,620 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:38:26,620 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:38:26,621 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T09:38:26,621 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T09:38:26,621 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:38:26,627 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:38:26,628 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:38:26,636 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:38:26,636 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:38:26,637 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:38:26,646 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:38:26,647 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:38:26,648 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:38:26,657 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:38:26,658 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:38:26,667 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:38:26,669 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:38:26,678 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:38:26,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:38:26,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:38:26,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,689 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,35857,1731663506251, sessionid=0x1013ddc26e60000, setting cluster-up flag (Was=false) 2024-11-15T09:38:26,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,741 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:38:26,742 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,35857,1731663506251 2024-11-15T09:38:26,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:26,794 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:38:26,795 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,35857,1731663506251 2024-11-15T09:38:26,796 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:38:26,798 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:38:26,798 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:38:26,798 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:38:26,798 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,35857,1731663506251 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:38:26,800 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,801 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663536801 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:38:26,802 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:38:26,802 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:38:26,802 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:38:26,803 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:38:26,803 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:38:26,803 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,803 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:38:26,808 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663506803,5,FailOnTimeoutGroup] 2024-11-15T09:38:26,812 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663506808,5,FailOnTimeoutGroup] 2024-11-15T09:38:26,812 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,812 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:38:26,812 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,812 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:38:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:38:26,816 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:38:26,816 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657 2024-11-15T09:38:26,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:38:26,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:38:26,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:26,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:38:26,826 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:38:26,827 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:26,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:38:26,829 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:38:26,829 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:26,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:38:26,830 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:38:26,830 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:26,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:38:26,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:38:26,832 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:26,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:26,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:38:26,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740 2024-11-15T09:38:26,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740 2024-11-15T09:38:26,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:38:26,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:38:26,835 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:38:26,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:38:26,841 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:38:26,841 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812565, jitterRate=0.03322996199131012}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:38:26,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663506824Initializing all the Stores at 1731663506824Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663506824Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663506825 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663506825Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663506825Cleaning up temporary data from old regions at 1731663506834 (+9 ms)Region opened successfully at 1731663506842 (+8 ms) 2024-11-15T09:38:26,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:38:26,842 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:38:26,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:38:26,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:38:26,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:38:26,843 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:38:26,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663506842Disabling compacts and flushes for region at 1731663506842Disabling writes for close at 1731663506842Writing region close event to WAL at 1731663506843 (+1 ms)Closed at 1731663506843 2024-11-15T09:38:26,844 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:38:26,844 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:38:26,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:38:26,845 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:38:26,846 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:38:26,863 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(746): ClusterId : dc934f2f-7761-4543-a102-3f25b17ee455 2024-11-15T09:38:26,863 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:38:26,868 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:38:26,868 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:38:26,879 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:38:26,879 DEBUG [RS:0;791f12959b23:37741 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3425efe7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:38:26,913 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:37741 2024-11-15T09:38:26,913 INFO [RS:0;791f12959b23:37741 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:38:26,913 INFO [RS:0;791f12959b23:37741 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:38:26,913 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:38:26,913 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,35857,1731663506251 with port=37741, startcode=1731663506439 2024-11-15T09:38:26,914 DEBUG [RS:0;791f12959b23:37741 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:38:26,915 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35219, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:38:26,915 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35857 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,37741,1731663506439 2024-11-15T09:38:26,916 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35857 {}] master.ServerManager(517): Registering regionserver=791f12959b23,37741,1731663506439 2024-11-15T09:38:26,917 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657 2024-11-15T09:38:26,917 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38379 2024-11-15T09:38:26,917 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:38:26,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:38:26,930 DEBUG [RS:0;791f12959b23:37741 {}] zookeeper.ZKUtil(111): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,37741,1731663506439 2024-11-15T09:38:26,930 WARN [RS:0;791f12959b23:37741 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:38:26,930 INFO [RS:0;791f12959b23:37741 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:38:26,930 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439 2024-11-15T09:38:26,930 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,37741,1731663506439] 2024-11-15T09:38:26,933 INFO [RS:0;791f12959b23:37741 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:38:26,934 INFO [RS:0;791f12959b23:37741 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:38:26,935 INFO [RS:0;791f12959b23:37741 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:38:26,935 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,935 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:38:26,936 INFO [RS:0;791f12959b23:37741 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:38:26,936 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:38:26,936 DEBUG [RS:0;791f12959b23:37741 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:38:26,948 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,948 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,948 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,948 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,948 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,948 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37741,1731663506439-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:38:26,964 INFO [RS:0;791f12959b23:37741 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:38:26,964 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,37741,1731663506439-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,964 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,965 INFO [RS:0;791f12959b23:37741 {}] regionserver.Replication(171): 791f12959b23,37741,1731663506439 started 2024-11-15T09:38:26,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:26,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:26,979 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:26,980 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,37741,1731663506439, RpcServer on 791f12959b23/172.17.0.2:37741, sessionid=0x1013ddc26e60001 2024-11-15T09:38:26,980 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:38:26,980 DEBUG [RS:0;791f12959b23:37741 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,37741,1731663506439 2024-11-15T09:38:26,980 DEBUG [RS:0;791f12959b23:37741 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,37741,1731663506439' 2024-11-15T09:38:26,980 DEBUG [RS:0;791f12959b23:37741 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:38:26,980 DEBUG [RS:0;791f12959b23:37741 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,37741,1731663506439 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,37741,1731663506439' 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:38:26,981 DEBUG [RS:0;791f12959b23:37741 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:38:26,981 INFO [RS:0;791f12959b23:37741 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:38:26,981 INFO [RS:0;791f12959b23:37741 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:38:26,997 WARN [791f12959b23:35857 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:38:27,083 INFO [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37741%2C1731663506439, suffix=, logDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439, archiveDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/oldWALs, maxLogs=32 2024-11-15T09:38:27,084 INFO [RS:0;791f12959b23:37741 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37741%2C1731663506439.1731663507084 2024-11-15T09:38:27,097 INFO [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663507084 2024-11-15T09:38:27,100 DEBUG [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43221:43221),(127.0.0.1/127.0.0.1:36185:36185)] 2024-11-15T09:38:27,247 DEBUG [791f12959b23:35857 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:38:27,247 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:27,249 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,37741,1731663506439, state=OPENING 2024-11-15T09:38:27,309 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:38:27,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:27,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:38:27,320 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:38:27,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:38:27,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:38:27,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,37741,1731663506439}] 2024-11-15T09:38:27,474 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:38:27,476 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56805, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:38:27,479 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:38:27,479 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:38:27,481 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C37741%2C1731663506439.meta, suffix=.meta, logDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439, archiveDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/oldWALs, maxLogs=32 2024-11-15T09:38:27,481 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37741%2C1731663506439.meta.1731663507481.meta 2024-11-15T09:38:27,493 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.meta.1731663507481.meta 2024-11-15T09:38:27,494 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36185:36185),(127.0.0.1/127.0.0.1:43221:43221)] 2024-11-15T09:38:27,494 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:38:27,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:38:27,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:38:27,495 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:38:27,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:38:27,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:27,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:38:27,495 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:38:27,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:38:27,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:38:27,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:27,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:27,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:38:27,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:38:27,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:27,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:27,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:38:27,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:38:27,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:27,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:27,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:38:27,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:38:27,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:27,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:38:27,502 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:38:27,503 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740 2024-11-15T09:38:27,504 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740 2024-11-15T09:38:27,505 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:38:27,505 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:38:27,505 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:38:27,507 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:38:27,507 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860553, jitterRate=0.09425024688243866}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:38:27,507 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:38:27,508 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663507495Writing region info on filesystem at 1731663507495Initializing all the Stores at 1731663507496 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663507496Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663507496Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663507496Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663507497 (+1 ms)Cleaning up temporary data from old regions at 1731663507505 (+8 ms)Running coprocessor post-open hooks at 1731663507507 (+2 ms)Region opened successfully at 1731663507508 (+1 ms) 2024-11-15T09:38:27,509 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663507474 2024-11-15T09:38:27,511 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:38:27,511 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:38:27,512 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:27,513 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,37741,1731663506439, state=OPEN 2024-11-15T09:38:27,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:38:27,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:38:27,555 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,37741,1731663506439 2024-11-15T09:38:27,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:38:27,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:38:27,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:38:27,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,37741,1731663506439 in 234 msec 2024-11-15T09:38:27,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:38:27,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 715 msec 2024-11-15T09:38:27,563 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:38:27,563 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:38:27,565 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:38:27,565 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,37741,1731663506439, seqNum=-1] 2024-11-15T09:38:27,565 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:38:27,566 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39465, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:38:27,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 773 msec 2024-11-15T09:38:27,572 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663507572, completionTime=-1 2024-11-15T09:38:27,572 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:38:27,572 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663567574 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663627574 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35857,1731663506251-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35857,1731663506251-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35857,1731663506251-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:35857, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,574 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,576 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:38:27,578 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.068sec 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35857,1731663506251-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:38:27,579 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35857,1731663506251-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:38:27,581 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:38:27,581 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:38:27,581 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,35857,1731663506251-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:38:27,664 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11088abd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:38:27,664 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,35857,-1 for getting cluster id 2024-11-15T09:38:27,664 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:38:27,665 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dc934f2f-7761-4543-a102-3f25b17ee455' 2024-11-15T09:38:27,665 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:38:27,665 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dc934f2f-7761-4543-a102-3f25b17ee455" 2024-11-15T09:38:27,666 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@317c372f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:38:27,666 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,35857,-1] 2024-11-15T09:38:27,666 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:38:27,666 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:38:27,667 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:38:27,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ea110c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:38:27,669 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:38:27,670 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,37741,1731663506439, seqNum=-1] 2024-11-15T09:38:27,670 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:38:27,671 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52516, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:38:27,673 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,35857,1731663506251 2024-11-15T09:38:27,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:38:27,675 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:38:27,675 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T09:38:27,676 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 791f12959b23,35857,1731663506251 2024-11-15T09:38:27,676 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6105880b 2024-11-15T09:38:27,676 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T09:38:27,678 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T09:38:27,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T09:38:27,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T09:38:27,678 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:38:27,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-15T09:38:27,681 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T09:38:27,681 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:27,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-15T09:38:27,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:38:27,682 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T09:38:27,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741835_1011 (size=381) 2024-11-15T09:38:27,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741835_1011 (size=381) 2024-11-15T09:38:27,690 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa2be1855009106bab2804d761e03478, NAME => 'TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657 2024-11-15T09:38:27,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741836_1012 (size=64) 2024-11-15T09:38:27,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741836_1012 (size=64) 2024-11-15T09:38:27,697 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:27,697 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing aa2be1855009106bab2804d761e03478, disabling compactions & flushes 2024-11-15T09:38:27,697 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:27,697 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:27,697 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. after waiting 0 ms 2024-11-15T09:38:27,697 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:27,697 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:27,697 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa2be1855009106bab2804d761e03478: Waiting for close lock at 1731663507697Disabling compacts and flushes for region at 1731663507697Disabling writes for close at 1731663507697Writing region close event to WAL at 1731663507697Closed at 1731663507697 2024-11-15T09:38:27,698 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T09:38:27,699 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731663507698"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663507698"}]},"ts":"1731663507698"} 2024-11-15T09:38:27,701 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T09:38:27,702 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T09:38:27,702 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663507702"}]},"ts":"1731663507702"} 2024-11-15T09:38:27,704 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-15T09:38:27,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, ASSIGN}] 2024-11-15T09:38:27,706 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, ASSIGN 2024-11-15T09:38:27,707 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, ASSIGN; state=OFFLINE, location=791f12959b23,37741,1731663506439; forceNewPlan=false, retain=false 2024-11-15T09:38:27,857 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa2be1855009106bab2804d761e03478, regionState=OPENING, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:27,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, ASSIGN because future has completed 2024-11-15T09:38:27,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa2be1855009106bab2804d761e03478, server=791f12959b23,37741,1731663506439}] 2024-11-15T09:38:27,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:27,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:28,016 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:28,017 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aa2be1855009106bab2804d761e03478, NAME => 'TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:38:28,017 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,017 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:28,017 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,017 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,018 INFO [StoreOpener-aa2be1855009106bab2804d761e03478-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,020 INFO [StoreOpener-aa2be1855009106bab2804d761e03478-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa2be1855009106bab2804d761e03478 columnFamilyName info 2024-11-15T09:38:28,020 DEBUG [StoreOpener-aa2be1855009106bab2804d761e03478-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:28,021 INFO [StoreOpener-aa2be1855009106bab2804d761e03478-1 {}] regionserver.HStore(327): Store=aa2be1855009106bab2804d761e03478/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:38:28,021 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,022 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,022 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,022 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,022 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,024 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,026 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:38:28,026 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aa2be1855009106bab2804d761e03478; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=834885, jitterRate=0.06161203980445862}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:38:28,027 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa2be1855009106bab2804d761e03478 2024-11-15T09:38:28,027 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aa2be1855009106bab2804d761e03478: Running coprocessor pre-open hook at 1731663508017Writing region info on filesystem at 1731663508017Initializing all the Stores at 1731663508018 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663508018Cleaning up temporary data from old regions at 1731663508022 (+4 ms)Running coprocessor post-open hooks at 1731663508027 (+5 ms)Region opened successfully at 1731663508027 2024-11-15T09:38:28,028 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., pid=6, masterSystemTime=1731663508013 2024-11-15T09:38:28,031 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:28,031 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:28,032 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa2be1855009106bab2804d761e03478, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:28,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa2be1855009106bab2804d761e03478, server=791f12959b23,37741,1731663506439 because future has completed 2024-11-15T09:38:28,040 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T09:38:28,040 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aa2be1855009106bab2804d761e03478, server=791f12959b23,37741,1731663506439 in 177 msec 2024-11-15T09:38:28,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T09:38:28,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, ASSIGN in 336 msec 2024-11-15T09:38:28,044 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T09:38:28,045 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731663508045"}]},"ts":"1731663508045"} 2024-11-15T09:38:28,047 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-15T09:38:28,048 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T09:38:28,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 369 msec 2024-11-15T09:38:28,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,259 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,764 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:38:28,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:28,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:28,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:29,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:29,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:30,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T09:38:30,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T09:38:30,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T09:38:30,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:30,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:31,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:31,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:32,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T09:38:32,934 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-15T09:38:32,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:32,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:33,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:33,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:34,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:34,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:35,731 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:38:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:35,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:35,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:36,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:36,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35857 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T09:38:37,788 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-15T09:38:37,788 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-15T09:38:37,790 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-15T09:38:37,790 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:37,793 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., hostname=791f12959b23,37741,1731663506439, seqNum=2] 2024-11-15T09:38:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:37,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:38:37,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/231a7a4c70454940a851d0adad80941c is 1080, key is row0001/info:/1731663517794/Put/seqid=0 2024-11-15T09:38:37,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741837_1013 (size=12509) 2024-11-15T09:38:37,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741837_1013 (size=12509) 2024-11-15T09:38:37,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/231a7a4c70454940a851d0adad80941c 2024-11-15T09:38:37,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/231a7a4c70454940a851d0adad80941c as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/231a7a4c70454940a851d0adad80941c 2024-11-15T09:38:37,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/231a7a4c70454940a851d0adad80941c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T09:38:37,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for aa2be1855009106bab2804d761e03478 in 41ms, sequenceid=11, compaction requested=false 2024-11-15T09:38:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:37,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-15T09:38:37,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/e3c3025233464cd5ab2dbe7d0fa984a2 is 1080, key is row0008/info:/1731663517808/Put/seqid=0 2024-11-15T09:38:37,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741838_1014 (size=23299) 2024-11-15T09:38:37,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741838_1014 (size=23299) 2024-11-15T09:38:37,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/e3c3025233464cd5ab2dbe7d0fa984a2 2024-11-15T09:38:37,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/e3c3025233464cd5ab2dbe7d0fa984a2 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2 2024-11-15T09:38:37,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2, entries=17, sequenceid=31, filesize=22.8 K 2024-11-15T09:38:37,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for aa2be1855009106bab2804d761e03478 in 26ms, sequenceid=31, compaction requested=false 2024-11-15T09:38:37,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:37,876 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=35.0 K, sizeToCheck=16.0 K 2024-11-15T09:38:37,876 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:37,876 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2 because midkey is the same as first or last row 2024-11-15T09:38:37,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:37,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:38,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:38,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:39,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-15T09:38:39,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/efad2046c1b14d98a1efc0f307969c71 is 1080, key is row0025/info:/1731663517850/Put/seqid=0 2024-11-15T09:38:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741839_1015 (size=14663) 2024-11-15T09:38:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741839_1015 (size=14663) 2024-11-15T09:38:39,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/efad2046c1b14d98a1efc0f307969c71 2024-11-15T09:38:39,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/efad2046c1b14d98a1efc0f307969c71 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/efad2046c1b14d98a1efc0f307969c71 2024-11-15T09:38:39,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/efad2046c1b14d98a1efc0f307969c71, entries=9, sequenceid=43, filesize=14.3 K 2024-11-15T09:38:39,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for aa2be1855009106bab2804d761e03478 in 26ms, sequenceid=43, compaction requested=true 2024-11-15T09:38:39,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:39,898 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,898 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,898 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2 because midkey is the same as first or last row 2024-11-15T09:38:39,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa2be1855009106bab2804d761e03478:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:39,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:39,898 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:39,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T09:38:39,900 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:39,900 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): aa2be1855009106bab2804d761e03478/info is initiating minor compaction (all files) 2024-11-15T09:38:39,900 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa2be1855009106bab2804d761e03478/info in TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:39,900 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/231a7a4c70454940a851d0adad80941c, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/efad2046c1b14d98a1efc0f307969c71] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp, totalSize=49.3 K 2024-11-15T09:38:39,901 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 231a7a4c70454940a851d0adad80941c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731663517794 2024-11-15T09:38:39,901 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3c3025233464cd5ab2dbe7d0fa984a2, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731663517808 2024-11-15T09:38:39,901 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting efad2046c1b14d98a1efc0f307969c71, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731663517850 2024-11-15T09:38:39,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/7ad117434d5447919de3805bd028d0f8 is 1080, key is row0034/info:/1731663519873/Put/seqid=0 2024-11-15T09:38:39,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741840_1016 (size=16817) 2024-11-15T09:38:39,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741840_1016 (size=16817) 2024-11-15T09:38:39,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/7ad117434d5447919de3805bd028d0f8 2024-11-15T09:38:39,919 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa2be1855009106bab2804d761e03478#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:39,920 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/aa43a162acca48879db6193831475152 is 1080, key is row0001/info:/1731663517794/Put/seqid=0 2024-11-15T09:38:39,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/7ad117434d5447919de3805bd028d0f8 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/7ad117434d5447919de3805bd028d0f8 2024-11-15T09:38:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741841_1017 (size=40670) 2024-11-15T09:38:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741841_1017 (size=40670) 2024-11-15T09:38:39,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/7ad117434d5447919de3805bd028d0f8, entries=11, sequenceid=57, filesize=16.4 K 2024-11-15T09:38:39,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for aa2be1855009106bab2804d761e03478 in 28ms, sequenceid=57, compaction requested=false 2024-11-15T09:38:39,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:39,927 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,927 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,927 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2 because midkey is the same as first or last row 2024-11-15T09:38:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:39,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T09:38:39,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/000cc5a82a21463ba81d66004fe0e6bf is 1080, key is row0045/info:/1731663519900/Put/seqid=0 2024-11-15T09:38:39,932 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/aa43a162acca48879db6193831475152 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 2024-11-15T09:38:39,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741842_1018 (size=20064) 2024-11-15T09:38:39,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741842_1018 (size=20064) 2024-11-15T09:38:39,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/000cc5a82a21463ba81d66004fe0e6bf 2024-11-15T09:38:39,940 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa2be1855009106bab2804d761e03478/info of aa2be1855009106bab2804d761e03478 into aa43a162acca48879db6193831475152(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:39,940 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., storeName=aa2be1855009106bab2804d761e03478/info, priority=13, startTime=1731663519898; duration=0sec 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 because midkey is the same as first or last row 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 because midkey is the same as first or last row 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 because midkey is the same as first or last row 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:39,940 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa2be1855009106bab2804d761e03478:info 2024-11-15T09:38:39,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/000cc5a82a21463ba81d66004fe0e6bf as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/000cc5a82a21463ba81d66004fe0e6bf 2024-11-15T09:38:39,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/000cc5a82a21463ba81d66004fe0e6bf, entries=14, sequenceid=74, filesize=19.6 K 2024-11-15T09:38:39,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=6.30 KB/6456 for aa2be1855009106bab2804d761e03478 in 22ms, sequenceid=74, compaction requested=true 2024-11-15T09:38:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.7 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 because midkey is the same as first or last row 2024-11-15T09:38:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa2be1855009106bab2804d761e03478:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:39,950 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:39,951 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:39,951 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): aa2be1855009106bab2804d761e03478/info is initiating minor compaction (all files) 2024-11-15T09:38:39,951 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa2be1855009106bab2804d761e03478/info in TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:39,952 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/7ad117434d5447919de3805bd028d0f8, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/000cc5a82a21463ba81d66004fe0e6bf] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp, totalSize=75.7 K 2024-11-15T09:38:39,952 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa43a162acca48879db6193831475152, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731663517794 2024-11-15T09:38:39,952 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ad117434d5447919de3805bd028d0f8, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1731663519873 2024-11-15T09:38:39,952 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 000cc5a82a21463ba81d66004fe0e6bf, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731663519900 2024-11-15T09:38:39,961 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa2be1855009106bab2804d761e03478#info#compaction#61 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:39,961 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/a05fffb0a8844d969bf620c467912a14 is 1080, key is row0001/info:/1731663517794/Put/seqid=0 2024-11-15T09:38:39,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741843_1019 (size=67766) 2024-11-15T09:38:39,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741843_1019 (size=67766) 2024-11-15T09:38:39,972 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/a05fffb0a8844d969bf620c467912a14 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 2024-11-15T09:38:39,978 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa2be1855009106bab2804d761e03478/info of aa2be1855009106bab2804d761e03478 into a05fffb0a8844d969bf620c467912a14(size=66.2 K), total size for store is 66.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:39,979 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., storeName=aa2be1855009106bab2804d761e03478/info, priority=13, startTime=1731663519950; duration=0sec 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 because midkey is the same as first or last row 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 because midkey is the same as first or last row 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 because midkey is the same as first or last row 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:39,979 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa2be1855009106bab2804d761e03478:info 2024-11-15T09:38:39,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:39,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:40,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:40,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:41,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:41,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:38:41,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/98937cbb82ac4b7084e1596e8b1f7a78 is 1080, key is row0059/info:/1731663519930/Put/seqid=0 2024-11-15T09:38:41,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741844_1020 (size=12509) 2024-11-15T09:38:41,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741844_1020 (size=12509) 2024-11-15T09:38:41,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/98937cbb82ac4b7084e1596e8b1f7a78 2024-11-15T09:38:41,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/98937cbb82ac4b7084e1596e8b1f7a78 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/98937cbb82ac4b7084e1596e8b1f7a78 2024-11-15T09:38:41,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/98937cbb82ac4b7084e1596e8b1f7a78, entries=7, sequenceid=86, filesize=12.2 K 2024-11-15T09:38:41,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for aa2be1855009106bab2804d761e03478 in 25ms, sequenceid=86, compaction requested=false 2024-11-15T09:38:41,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:41,972 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-15T09:38:41,972 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:41,972 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 because midkey is the same as first or last row 2024-11-15T09:38:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:41,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T09:38:41,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/de8677c0e4b0461586bd019fafd763c0 is 1080, key is row0066/info:/1731663521948/Put/seqid=0 2024-11-15T09:38:41,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741845_1021 (size=16817) 2024-11-15T09:38:41,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741845_1021 (size=16817) 2024-11-15T09:38:41,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/de8677c0e4b0461586bd019fafd763c0 2024-11-15T09:38:41,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/de8677c0e4b0461586bd019fafd763c0 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/de8677c0e4b0461586bd019fafd763c0 2024-11-15T09:38:41,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:41,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:41,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/de8677c0e4b0461586bd019fafd763c0, entries=11, sequenceid=100, filesize=16.4 K 2024-11-15T09:38:41,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for aa2be1855009106bab2804d761e03478 in 23ms, sequenceid=100, compaction requested=true 2024-11-15T09:38:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-11-15T09:38:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on aa2be1855009106bab2804d761e03478 2024-11-15T09:38:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 because midkey is the same as first or last row 2024-11-15T09:38:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store aa2be1855009106bab2804d761e03478:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:41,996 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:41,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T09:38:41,997 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:41,997 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): aa2be1855009106bab2804d761e03478/info is initiating minor compaction (all files) 2024-11-15T09:38:41,997 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of aa2be1855009106bab2804d761e03478/info in TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:41,997 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/98937cbb82ac4b7084e1596e8b1f7a78, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/de8677c0e4b0461586bd019fafd763c0] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp, totalSize=94.8 K 2024-11-15T09:38:41,998 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting a05fffb0a8844d969bf620c467912a14, keycount=58, bloomtype=ROW, size=66.2 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731663517794 2024-11-15T09:38:41,998 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98937cbb82ac4b7084e1596e8b1f7a78, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731663519930 2024-11-15T09:38:41,999 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting de8677c0e4b0461586bd019fafd763c0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731663521948 2024-11-15T09:38:42,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/a8483b36e42c47228d0b852f286f6fb9 is 1080, key is row0077/info:/1731663521974/Put/seqid=0 2024-11-15T09:38:42,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741846_1022 (size=17894) 2024-11-15T09:38:42,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/a8483b36e42c47228d0b852f286f6fb9 2024-11-15T09:38:42,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741846_1022 (size=17894) 2024-11-15T09:38:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/a8483b36e42c47228d0b852f286f6fb9 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a8483b36e42c47228d0b852f286f6fb9 2024-11-15T09:38:42,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a8483b36e42c47228d0b852f286f6fb9, entries=12, sequenceid=115, filesize=17.5 K 2024-11-15T09:38:42,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for aa2be1855009106bab2804d761e03478 in 41ms, sequenceid=115, compaction requested=false 2024-11-15T09:38:42,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:42,038 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.3 K, sizeToCheck=16.0 K 2024-11-15T09:38:42,038 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:42,038 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 because midkey is the same as first or last row 2024-11-15T09:38:42,042 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): aa2be1855009106bab2804d761e03478#info#compaction#65 average throughput is 26.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:42,043 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/734ec7c61b46446bb86d3cd0f9e0084b is 1080, key is row0001/info:/1731663517794/Put/seqid=0 2024-11-15T09:38:42,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741847_1023 (size=87327) 2024-11-15T09:38:42,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741847_1023 (size=87327) 2024-11-15T09:38:42,056 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/734ec7c61b46446bb86d3cd0f9e0084b as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b 2024-11-15T09:38:42,064 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in aa2be1855009106bab2804d761e03478/info of aa2be1855009106bab2804d761e03478 into 734ec7c61b46446bb86d3cd0f9e0084b(size=85.3 K), total size for store is 102.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for aa2be1855009106bab2804d761e03478: 2024-11-15T09:38:42,065 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., storeName=aa2be1855009106bab2804d761e03478/info, priority=13, startTime=1731663521996; duration=0sec 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-15T09:38:42,065 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T09:38:42,066 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:42,066 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:42,066 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: aa2be1855009106bab2804d761e03478:info 2024-11-15T09:38:42,067 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35857 {}] assignment.AssignmentManager(1355): Split request from 791f12959b23,37741,1731663506439, parent={ENCODED => aa2be1855009106bab2804d761e03478, NAME => 'TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-15T09:38:42,073 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35857 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=791f12959b23,37741,1731663506439 2024-11-15T09:38:42,077 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35857 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa2be1855009106bab2804d761e03478, daughterA=e786ded921dfe1055e05e19faacf9ea9, daughterB=e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,078 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa2be1855009106bab2804d761e03478, daughterA=e786ded921dfe1055e05e19faacf9ea9, daughterB=e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,078 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa2be1855009106bab2804d761e03478, daughterA=e786ded921dfe1055e05e19faacf9ea9, daughterB=e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,078 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa2be1855009106bab2804d761e03478, daughterA=e786ded921dfe1055e05e19faacf9ea9, daughterB=e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, UNASSIGN}] 2024-11-15T09:38:42,089 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, UNASSIGN 2024-11-15T09:38:42,091 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aa2be1855009106bab2804d761e03478, regionState=CLOSING, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:42,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, UNASSIGN because future has completed 2024-11-15T09:38:42,096 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T09:38:42,097 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa2be1855009106bab2804d761e03478, server=791f12959b23,37741,1731663506439}] 2024-11-15T09:38:42,255 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,255 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T09:38:42,256 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing aa2be1855009106bab2804d761e03478, disabling compactions & flushes 2024-11-15T09:38:42,256 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:42,256 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:42,256 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. after waiting 0 ms 2024-11-15T09:38:42,256 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:42,256 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing aa2be1855009106bab2804d761e03478 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-15T09:38:42,260 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/f7e585636945453a9851fb96ac2ec7a5 is 1080, key is row0089/info:/1731663521997/Put/seqid=0 2024-11-15T09:38:42,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741848_1024 (size=13586) 2024-11-15T09:38:42,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741848_1024 (size=13586) 2024-11-15T09:38:42,266 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/f7e585636945453a9851fb96ac2ec7a5 2024-11-15T09:38:42,271 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/.tmp/info/f7e585636945453a9851fb96ac2ec7a5 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/f7e585636945453a9851fb96ac2ec7a5 2024-11-15T09:38:42,277 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/f7e585636945453a9851fb96ac2ec7a5, entries=8, sequenceid=127, filesize=13.3 K 2024-11-15T09:38:42,279 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for aa2be1855009106bab2804d761e03478 in 22ms, sequenceid=127, compaction requested=true 2024-11-15T09:38:42,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/231a7a4c70454940a851d0adad80941c, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/efad2046c1b14d98a1efc0f307969c71, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/7ad117434d5447919de3805bd028d0f8, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/000cc5a82a21463ba81d66004fe0e6bf, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/98937cbb82ac4b7084e1596e8b1f7a78, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/de8677c0e4b0461586bd019fafd763c0] to archive 2024-11-15T09:38:42,281 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T09:38:42,283 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/231a7a4c70454940a851d0adad80941c to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/231a7a4c70454940a851d0adad80941c 2024-11-15T09:38:42,284 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/e3c3025233464cd5ab2dbe7d0fa984a2 2024-11-15T09:38:42,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/aa43a162acca48879db6193831475152 2024-11-15T09:38:42,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/efad2046c1b14d98a1efc0f307969c71 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/efad2046c1b14d98a1efc0f307969c71 2024-11-15T09:38:42,288 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/7ad117434d5447919de3805bd028d0f8 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/7ad117434d5447919de3805bd028d0f8 2024-11-15T09:38:42,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a05fffb0a8844d969bf620c467912a14 2024-11-15T09:38:42,290 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/000cc5a82a21463ba81d66004fe0e6bf to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/000cc5a82a21463ba81d66004fe0e6bf 2024-11-15T09:38:42,291 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/98937cbb82ac4b7084e1596e8b1f7a78 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/98937cbb82ac4b7084e1596e8b1f7a78 2024-11-15T09:38:42,293 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/de8677c0e4b0461586bd019fafd763c0 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/de8677c0e4b0461586bd019fafd763c0 2024-11-15T09:38:42,301 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-15T09:38:42,302 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. 2024-11-15T09:38:42,303 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for aa2be1855009106bab2804d761e03478: Waiting for close lock at 1731663522256Running coprocessor pre-close hooks at 1731663522256Disabling compacts and flushes for region at 1731663522256Disabling writes for close at 1731663522256Obtaining lock to block concurrent updates at 1731663522256Preparing flush snapshotting stores in aa2be1855009106bab2804d761e03478 at 1731663522256Finished memstore snapshotting TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731663522256Flushing stores of TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. at 1731663522257 (+1 ms)Flushing aa2be1855009106bab2804d761e03478/info: creating writer at 1731663522257Flushing aa2be1855009106bab2804d761e03478/info: appending metadata at 1731663522260 (+3 ms)Flushing aa2be1855009106bab2804d761e03478/info: closing flushed file at 1731663522260Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35fe1eab: reopening flushed file at 1731663522270 (+10 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for aa2be1855009106bab2804d761e03478 in 22ms, sequenceid=127, compaction requested=true at 1731663522279 (+9 ms)Writing region close event to WAL at 1731663522298 (+19 ms)Running coprocessor post-close hooks at 1731663522302 (+4 ms)Closed at 1731663522302 2024-11-15T09:38:42,306 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aa2be1855009106bab2804d761e03478, regionState=CLOSED 2024-11-15T09:38:42,308 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa2be1855009106bab2804d761e03478, server=791f12959b23,37741,1731663506439 because future has completed 2024-11-15T09:38:42,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-15T09:38:42,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure aa2be1855009106bab2804d761e03478, server=791f12959b23,37741,1731663506439 in 213 msec 2024-11-15T09:38:42,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T09:38:42,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=aa2be1855009106bab2804d761e03478, UNASSIGN in 225 msec 2024-11-15T09:38:42,322 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:42,327 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=aa2be1855009106bab2804d761e03478, threads=3 2024-11-15T09:38:42,329 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a8483b36e42c47228d0b852f286f6fb9 for region: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,329 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b for region: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,332 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/f7e585636945453a9851fb96ac2ec7a5 for region: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,348 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a8483b36e42c47228d0b852f286f6fb9, top=true 2024-11-15T09:38:42,349 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/f7e585636945453a9851fb96ac2ec7a5, top=true 2024-11-15T09:38:42,361 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9 for child: e72c9b9e6bc67d6d04ec4895f22c5ab8, parent: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,361 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/a8483b36e42c47228d0b852f286f6fb9 for region: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,363 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5 for child: e72c9b9e6bc67d6d04ec4895f22c5ab8, parent: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,363 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/f7e585636945453a9851fb96ac2ec7a5 for region: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741849_1025 (size=27) 2024-11-15T09:38:42,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741849_1025 (size=27) 2024-11-15T09:38:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741850_1026 (size=27) 2024-11-15T09:38:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741850_1026 (size=27) 2024-11-15T09:38:42,376 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b for region: aa2be1855009106bab2804d761e03478 2024-11-15T09:38:42,378 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region aa2be1855009106bab2804d761e03478 Daughter A: [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478] storefiles, Daughter B: [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5] storefiles. 2024-11-15T09:38:42,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741851_1027 (size=71) 2024-11-15T09:38:42,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741851_1027 (size=71) 2024-11-15T09:38:42,392 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:42,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741852_1028 (size=71) 2024-11-15T09:38:42,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741852_1028 (size=71) 2024-11-15T09:38:42,406 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:42,417 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-15T09:38:42,420 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-15T09:38:42,422 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731663522422"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731663522422"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731663522422"}]},"ts":"1731663522422"} 2024-11-15T09:38:42,422 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731663522422"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663522422"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731663522422"}]},"ts":"1731663522422"} 2024-11-15T09:38:42,422 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731663522422"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731663522422"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731663522422"}]},"ts":"1731663522422"} 2024-11-15T09:38:42,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e786ded921dfe1055e05e19faacf9ea9, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e72c9b9e6bc67d6d04ec4895f22c5ab8, ASSIGN}] 2024-11-15T09:38:42,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e786ded921dfe1055e05e19faacf9ea9, ASSIGN 2024-11-15T09:38:42,442 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e72c9b9e6bc67d6d04ec4895f22c5ab8, ASSIGN 2024-11-15T09:38:42,443 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e72c9b9e6bc67d6d04ec4895f22c5ab8, ASSIGN; state=SPLITTING_NEW, location=791f12959b23,37741,1731663506439; forceNewPlan=false, retain=false 2024-11-15T09:38:42,443 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e786ded921dfe1055e05e19faacf9ea9, ASSIGN; state=SPLITTING_NEW, location=791f12959b23,37741,1731663506439; forceNewPlan=false, retain=false 2024-11-15T09:38:42,593 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e786ded921dfe1055e05e19faacf9ea9, regionState=OPENING, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:42,593 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e72c9b9e6bc67d6d04ec4895f22c5ab8, regionState=OPENING, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:42,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e72c9b9e6bc67d6d04ec4895f22c5ab8, ASSIGN because future has completed 2024-11-15T09:38:42,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e72c9b9e6bc67d6d04ec4895f22c5ab8, server=791f12959b23,37741,1731663506439}] 2024-11-15T09:38:42,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e786ded921dfe1055e05e19faacf9ea9, ASSIGN because future has completed 2024-11-15T09:38:42,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e786ded921dfe1055e05e19faacf9ea9, server=791f12959b23,37741,1731663506439}] 2024-11-15T09:38:42,752 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:38:42,752 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => e786ded921dfe1055e05e19faacf9ea9, NAME => 'TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-15T09:38:42,752 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,752 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:42,752 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,752 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,753 INFO [StoreOpener-e786ded921dfe1055e05e19faacf9ea9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,754 INFO [StoreOpener-e786ded921dfe1055e05e19faacf9ea9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e786ded921dfe1055e05e19faacf9ea9 columnFamilyName info 2024-11-15T09:38:42,754 DEBUG [StoreOpener-e786ded921dfe1055e05e19faacf9ea9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:42,768 DEBUG [StoreOpener-e786ded921dfe1055e05e19faacf9ea9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478->hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b-bottom 2024-11-15T09:38:42,769 INFO [StoreOpener-e786ded921dfe1055e05e19faacf9ea9-1 {}] regionserver.HStore(327): Store=e786ded921dfe1055e05e19faacf9ea9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:38:42,769 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,770 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,771 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,771 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,771 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,773 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,773 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened e786ded921dfe1055e05e19faacf9ea9; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720903, jitterRate=-0.08332513272762299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:38:42,773 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:38:42,774 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for e786ded921dfe1055e05e19faacf9ea9: Running coprocessor pre-open hook at 1731663522752Writing region info on filesystem at 1731663522752Initializing all the Stores at 1731663522753 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663522753Cleaning up temporary data from old regions at 1731663522771 (+18 ms)Running coprocessor post-open hooks at 1731663522773 (+2 ms)Region opened successfully at 1731663522774 (+1 ms) 2024-11-15T09:38:42,774 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9., pid=13, masterSystemTime=1731663522748 2024-11-15T09:38:42,775 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store e786ded921dfe1055e05e19faacf9ea9:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:42,775 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:42,775 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-15T09:38:42,775 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:38:42,775 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e786ded921dfe1055e05e19faacf9ea9/info is initiating minor compaction (all files) 2024-11-15T09:38:42,775 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e786ded921dfe1055e05e19faacf9ea9/info in TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:38:42,776 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478->hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b-bottom] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/.tmp, totalSize=85.3 K 2024-11-15T09:38:42,776 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731663517794 2024-11-15T09:38:42,777 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:38:42,777 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:38:42,777 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:42,777 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => e72c9b9e6bc67d6d04ec4895f22c5ab8, NAME => 'TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-15T09:38:42,777 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,777 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:38:42,777 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,777 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e786ded921dfe1055e05e19faacf9ea9, regionState=OPEN, openSeqNum=131, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:42,777 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,778 INFO [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,779 INFO [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e72c9b9e6bc67d6d04ec4895f22c5ab8 columnFamilyName info 2024-11-15T09:38:42,779 DEBUG [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:38:42,779 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-15T09:38:42,780 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-15T09:38:42,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-15T09:38:42,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e786ded921dfe1055e05e19faacf9ea9, server=791f12959b23,37741,1731663506439 because future has completed 2024-11-15T09:38:42,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-15T09:38:42,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure e786ded921dfe1055e05e19faacf9ea9, server=791f12959b23,37741,1731663506439 in 184 msec 2024-11-15T09:38:42,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e786ded921dfe1055e05e19faacf9ea9, ASSIGN in 343 msec 2024-11-15T09:38:42,788 DEBUG [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478->hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b-top 2024-11-15T09:38:42,794 DEBUG [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9 2024-11-15T09:38:42,795 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e786ded921dfe1055e05e19faacf9ea9#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:42,795 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/.tmp/info/6a901a59c9ce4a30aa7a13fa15387f56 is 1080, key is row0001/info:/1731663517794/Put/seqid=0 2024-11-15T09:38:42,799 DEBUG [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5 2024-11-15T09:38:42,799 INFO [StoreOpener-e72c9b9e6bc67d6d04ec4895f22c5ab8-1 {}] regionserver.HStore(327): Store=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:38:42,799 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,800 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/info/14756f07383f49c6a7a8f3ae92859a93 is 193, key is TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8./info:regioninfo/1731663522593/Put/seqid=0 2024-11-15T09:38:42,801 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741853_1029 (size=70862) 2024-11-15T09:38:42,802 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,802 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741853_1029 (size=70862) 2024-11-15T09:38:42,804 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741854_1030 (size=9847) 2024-11-15T09:38:42,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741854_1030 (size=9847) 2024-11-15T09:38:42,805 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened e72c9b9e6bc67d6d04ec4895f22c5ab8; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720491, jitterRate=-0.08384934067726135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T09:38:42,805 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:42,805 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: Running coprocessor pre-open hook at 1731663522777Writing region info on filesystem at 1731663522777Initializing all the Stores at 1731663522778 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663522778Cleaning up temporary data from old regions at 1731663522802 (+24 ms)Running coprocessor post-open hooks at 1731663522805 (+3 ms)Region opened successfully at 1731663522805 2024-11-15T09:38:42,806 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., pid=12, masterSystemTime=1731663522748 2024-11-15T09:38:42,806 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 2 2024-11-15T09:38:42,806 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:42,806 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:42,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/info/14756f07383f49c6a7a8f3ae92859a93 2024-11-15T09:38:42,807 INFO [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:42,807 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:38:42,807 INFO [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:42,808 INFO [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478->hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b-top, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=116.0 K 2024-11-15T09:38:42,808 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] compactions.Compactor(225): Compacting 734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731663517794 2024-11-15T09:38:42,809 DEBUG [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:42,809 INFO [RS_OPEN_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:42,809 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731663521974 2024-11-15T09:38:42,810 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=e72c9b9e6bc67d6d04ec4895f22c5ab8, regionState=OPEN, openSeqNum=131, regionLocation=791f12959b23,37741,1731663506439 2024-11-15T09:38:42,810 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/.tmp/info/6a901a59c9ce4a30aa7a13fa15387f56 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/6a901a59c9ce4a30aa7a13fa15387f56 2024-11-15T09:38:42,810 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731663521997 2024-11-15T09:38:42,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure e72c9b9e6bc67d6d04ec4895f22c5ab8, server=791f12959b23,37741,1731663506439 because future has completed 2024-11-15T09:38:42,816 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in e786ded921dfe1055e05e19faacf9ea9/info of e786ded921dfe1055e05e19faacf9ea9 into 6a901a59c9ce4a30aa7a13fa15387f56(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:42,817 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e786ded921dfe1055e05e19faacf9ea9: 2024-11-15T09:38:42,817 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9., storeName=e786ded921dfe1055e05e19faacf9ea9/info, priority=15, startTime=1731663522775; duration=0sec 2024-11-15T09:38:42,817 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:42,817 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e786ded921dfe1055e05e19faacf9ea9:info 2024-11-15T09:38:42,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-15T09:38:42,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure e72c9b9e6bc67d6d04ec4895f22c5ab8, server=791f12959b23,37741,1731663506439 in 218 msec 2024-11-15T09:38:42,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-15T09:38:42,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e72c9b9e6bc67d6d04ec4895f22c5ab8, ASSIGN in 378 msec 2024-11-15T09:38:42,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=aa2be1855009106bab2804d761e03478, daughterA=e786ded921dfe1055e05e19faacf9ea9, daughterB=e72c9b9e6bc67d6d04ec4895f22c5ab8 in 747 msec 2024-11-15T09:38:42,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/ns/c4677dd7a62d4b8585f979334b5903f8 is 43, key is default/ns:d/1731663507567/Put/seqid=0 2024-11-15T09:38:42,833 INFO [RS:0;791f12959b23:37741-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#70 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:42,833 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/42b2556167ed4e42b9e8eb6ba81d00ec is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:38:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741855_1031 (size=5153) 2024-11-15T09:38:42,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741855_1031 (size=5153) 2024-11-15T09:38:42,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/ns/c4677dd7a62d4b8585f979334b5903f8 2024-11-15T09:38:42,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741856_1032 (size=42984) 2024-11-15T09:38:42,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741856_1032 (size=42984) 2024-11-15T09:38:42,849 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/42b2556167ed4e42b9e8eb6ba81d00ec as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/42b2556167ed4e42b9e8eb6ba81d00ec 2024-11-15T09:38:42,855 INFO [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into 42b2556167ed4e42b9e8eb6ba81d00ec(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:42,855 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:42,855 INFO [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663522806; duration=0sec 2024-11-15T09:38:42,855 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:42,855 DEBUG [RS:0;791f12959b23:37741-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:38:42,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/table/20314e12c4564fa38f45e36cecdd41aa is 65, key is TestLogRolling-testLogRolling/table:state/1731663508045/Put/seqid=0 2024-11-15T09:38:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741857_1033 (size=5340) 2024-11-15T09:38:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741857_1033 (size=5340) 2024-11-15T09:38:42,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/table/20314e12c4564fa38f45e36cecdd41aa 2024-11-15T09:38:42,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/info/14756f07383f49c6a7a8f3ae92859a93 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/info/14756f07383f49c6a7a8f3ae92859a93 2024-11-15T09:38:42,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/info/14756f07383f49c6a7a8f3ae92859a93, entries=30, sequenceid=17, filesize=9.6 K 2024-11-15T09:38:42,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/ns/c4677dd7a62d4b8585f979334b5903f8 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/ns/c4677dd7a62d4b8585f979334b5903f8 2024-11-15T09:38:42,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/ns/c4677dd7a62d4b8585f979334b5903f8, entries=2, sequenceid=17, filesize=5.0 K 2024-11-15T09:38:42,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/table/20314e12c4564fa38f45e36cecdd41aa as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/table/20314e12c4564fa38f45e36cecdd41aa 2024-11-15T09:38:42,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/table/20314e12c4564fa38f45e36cecdd41aa, entries=2, sequenceid=17, filesize=5.2 K 2024-11-15T09:38:42,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 105ms, sequenceid=17, compaction requested=false 2024-11-15T09:38:42,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T09:38:42,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:42,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:43,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:43,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:44,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52516 deadline: 1731663534022, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. is not online on 791f12959b23,37741,1731663506439 2024-11-15T09:38:44,049 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., hostname=791f12959b23,37741,1731663506439, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., hostname=791f12959b23,37741,1731663506439, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. is not online on 791f12959b23,37741,1731663506439 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T09:38:44,050 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., hostname=791f12959b23,37741,1731663506439, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478. is not online on 791f12959b23,37741,1731663506439 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T09:38:44,050 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731663507678.aa2be1855009106bab2804d761e03478., hostname=791f12959b23,37741,1731663506439, seqNum=2 from cache 2024-11-15T09:38:44,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:44,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:45,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:45,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:46,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:46,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,863 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:38:47,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:38:47,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:47,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:48,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:48,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:49,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:49,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:50,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:50,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:51,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:51,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:52,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:52,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:54,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:54,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:54,170 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., hostname=791f12959b23,37741,1731663506439, seqNum=131] 2024-11-15T09:38:54,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:54,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:38:54,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/f6e8493d441b4182a8ab0435bf5b3a6d is 1080, key is row0097/info:/1731663534172/Put/seqid=0 2024-11-15T09:38:54,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741858_1034 (size=12516) 2024-11-15T09:38:54,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741858_1034 (size=12516) 2024-11-15T09:38:54,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/f6e8493d441b4182a8ab0435bf5b3a6d 2024-11-15T09:38:54,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/f6e8493d441b4182a8ab0435bf5b3a6d as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f6e8493d441b4182a8ab0435bf5b3a6d 2024-11-15T09:38:54,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f6e8493d441b4182a8ab0435bf5b3a6d, entries=7, sequenceid=141, filesize=12.2 K 2024-11-15T09:38:54,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 25ms, sequenceid=141, compaction requested=false 2024-11-15T09:38:54,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:54,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:54,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T09:38:54,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/637b0657e2724f939db581052f15cd55 is 1080, key is row0104/info:/1731663534184/Put/seqid=0 2024-11-15T09:38:54,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741859_1035 (size=19000) 2024-11-15T09:38:54,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741859_1035 (size=19000) 2024-11-15T09:38:54,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/637b0657e2724f939db581052f15cd55 2024-11-15T09:38:54,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/637b0657e2724f939db581052f15cd55 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/637b0657e2724f939db581052f15cd55 2024-11-15T09:38:54,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/637b0657e2724f939db581052f15cd55, entries=13, sequenceid=157, filesize=18.6 K 2024-11-15T09:38:54,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 21ms, sequenceid=157, compaction requested=true 2024-11-15T09:38:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:54,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:54,230 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:54,231 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:54,231 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:38:54,231 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:54,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:54,231 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/42b2556167ed4e42b9e8eb6ba81d00ec, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f6e8493d441b4182a8ab0435bf5b3a6d, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/637b0657e2724f939db581052f15cd55] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=72.8 K 2024-11-15T09:38:54,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T09:38:54,232 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 42b2556167ed4e42b9e8eb6ba81d00ec, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731663519938 2024-11-15T09:38:54,232 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6e8493d441b4182a8ab0435bf5b3a6d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731663534172 2024-11-15T09:38:54,232 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 637b0657e2724f939db581052f15cd55, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731663534184 2024-11-15T09:38:54,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/d21a6ea1a6b04de4a6a736ff864336b0 is 1080, key is row0117/info:/1731663534210/Put/seqid=0 2024-11-15T09:38:54,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741860_1036 (size=17906) 2024-11-15T09:38:54,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741860_1036 (size=17906) 2024-11-15T09:38:54,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/d21a6ea1a6b04de4a6a736ff864336b0 2024-11-15T09:38:54,241 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#75 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:54,242 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/c4df9acb8ecc445db0397d55c2ced781 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:38:54,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/d21a6ea1a6b04de4a6a736ff864336b0 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d21a6ea1a6b04de4a6a736ff864336b0 2024-11-15T09:38:54,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741861_1037 (size=64714) 2024-11-15T09:38:54,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741861_1037 (size=64714) 2024-11-15T09:38:54,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d21a6ea1a6b04de4a6a736ff864336b0, entries=12, sequenceid=172, filesize=17.5 K 2024-11-15T09:38:54,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 19ms, sequenceid=172, compaction requested=false 2024-11-15T09:38:54,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:54,659 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/c4df9acb8ecc445db0397d55c2ced781 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/c4df9acb8ecc445db0397d55c2ced781 2024-11-15T09:38:54,668 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into c4df9acb8ecc445db0397d55c2ced781(size=63.2 K), total size for store is 80.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:54,668 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:54,668 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663534230; duration=0sec 2024-11-15T09:38:54,668 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:54,668 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:38:55,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:55,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:56,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:56,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:56,233 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T09:38:56,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:56,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:38:56,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/09bf034ca5b440b3821af696124f6978 is 1080, key is row0129/info:/1731663536234/Put/seqid=0 2024-11-15T09:38:56,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741862_1038 (size=12516) 2024-11-15T09:38:56,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741862_1038 (size=12516) 2024-11-15T09:38:56,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/09bf034ca5b440b3821af696124f6978 2024-11-15T09:38:56,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/09bf034ca5b440b3821af696124f6978 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/09bf034ca5b440b3821af696124f6978 2024-11-15T09:38:56,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/09bf034ca5b440b3821af696124f6978, entries=7, sequenceid=183, filesize=12.2 K 2024-11-15T09:38:56,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 22ms, sequenceid=183, compaction requested=true 2024-11-15T09:38:56,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:56,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:56,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:56,274 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:56,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:56,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T09:38:56,275 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 95136 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:56,275 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:38:56,275 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:56,275 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/c4df9acb8ecc445db0397d55c2ced781, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d21a6ea1a6b04de4a6a736ff864336b0, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/09bf034ca5b440b3821af696124f6978] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=92.9 K 2024-11-15T09:38:56,275 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting c4df9acb8ecc445db0397d55c2ced781, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1731663519938 2024-11-15T09:38:56,276 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting d21a6ea1a6b04de4a6a736ff864336b0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1731663534210 2024-11-15T09:38:56,276 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09bf034ca5b440b3821af696124f6978, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1731663536234 2024-11-15T09:38:56,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/3af24c4cf3e74b81afdb6f3f34afcd7e is 1080, key is row0136/info:/1731663536252/Put/seqid=0 2024-11-15T09:38:56,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741863_1039 (size=16828) 2024-11-15T09:38:56,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741863_1039 (size=16828) 2024-11-15T09:38:56,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/3af24c4cf3e74b81afdb6f3f34afcd7e 2024-11-15T09:38:56,288 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#78 average throughput is 25.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:56,288 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/cad0f07ce9a04d0dacb2a2a706322d97 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:38:56,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/3af24c4cf3e74b81afdb6f3f34afcd7e as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/3af24c4cf3e74b81afdb6f3f34afcd7e 2024-11-15T09:38:56,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741864_1040 (size=85371) 2024-11-15T09:38:56,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741864_1040 (size=85371) 2024-11-15T09:38:56,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/3af24c4cf3e74b81afdb6f3f34afcd7e, entries=11, sequenceid=197, filesize=16.4 K 2024-11-15T09:38:56,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 22ms, sequenceid=197, compaction requested=false 2024-11-15T09:38:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:56,298 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/cad0f07ce9a04d0dacb2a2a706322d97 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/cad0f07ce9a04d0dacb2a2a706322d97 2024-11-15T09:38:56,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:56,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T09:38:56,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/d38ed6bf6abe49b6b1d8b4767b0c460c is 1080, key is row0147/info:/1731663536275/Put/seqid=0 2024-11-15T09:38:56,304 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into cad0f07ce9a04d0dacb2a2a706322d97(size=83.4 K), total size for store is 99.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:56,304 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:56,304 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663536274; duration=0sec 2024-11-15T09:38:56,304 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:56,304 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:38:56,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741865_1041 (size=17906) 2024-11-15T09:38:56,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741865_1041 (size=17906) 2024-11-15T09:38:56,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/d38ed6bf6abe49b6b1d8b4767b0c460c 2024-11-15T09:38:56,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/d38ed6bf6abe49b6b1d8b4767b0c460c as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d38ed6bf6abe49b6b1d8b4767b0c460c 2024-11-15T09:38:56,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d38ed6bf6abe49b6b1d8b4767b0c460c, entries=12, sequenceid=212, filesize=17.5 K 2024-11-15T09:38:56,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 20ms, sequenceid=212, compaction requested=true 2024-11-15T09:38:56,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:56,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:56,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:56,318 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:56,319 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120105 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:56,319 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:38:56,319 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:56,319 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/cad0f07ce9a04d0dacb2a2a706322d97, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/3af24c4cf3e74b81afdb6f3f34afcd7e, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d38ed6bf6abe49b6b1d8b4767b0c460c] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=117.3 K 2024-11-15T09:38:56,319 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting cad0f07ce9a04d0dacb2a2a706322d97, keycount=74, bloomtype=ROW, size=83.4 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1731663519938 2024-11-15T09:38:56,320 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3af24c4cf3e74b81afdb6f3f34afcd7e, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731663536252 2024-11-15T09:38:56,320 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting d38ed6bf6abe49b6b1d8b4767b0c460c, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1731663536275 2024-11-15T09:38:56,330 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#80 average throughput is 49.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:56,331 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/f43716a8f3254553bea3cb9d0b1ee384 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:38:56,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741866_1042 (size=110275) 2024-11-15T09:38:56,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741866_1042 (size=110275) 2024-11-15T09:38:56,750 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/f43716a8f3254553bea3cb9d0b1ee384 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f43716a8f3254553bea3cb9d0b1ee384 2024-11-15T09:38:56,757 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into f43716a8f3254553bea3cb9d0b1ee384(size=107.7 K), total size for store is 107.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:56,757 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:56,757 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663536318; duration=0sec 2024-11-15T09:38:56,757 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:56,757 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:38:57,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:57,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:58,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:58,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:58,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:58,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:38:58,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/b59c47997a664197b47f19cac1b0ca80 is 1080, key is row0159/info:/1731663536300/Put/seqid=0 2024-11-15T09:38:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741867_1043 (size=12516) 2024-11-15T09:38:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741867_1043 (size=12516) 2024-11-15T09:38:58,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=224 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/b59c47997a664197b47f19cac1b0ca80 2024-11-15T09:38:58,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/b59c47997a664197b47f19cac1b0ca80 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/b59c47997a664197b47f19cac1b0ca80 2024-11-15T09:38:58,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/b59c47997a664197b47f19cac1b0ca80, entries=7, sequenceid=224, filesize=12.2 K 2024-11-15T09:38:58,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 38ms, sequenceid=224, compaction requested=false 2024-11-15T09:38:58,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:38:58,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-15T09:38:58,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/fc0d257763a14feaa585c5f70b32c298 is 1080, key is row0166/info:/1731663538320/Put/seqid=0 2024-11-15T09:38:58,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741868_1044 (size=28706) 2024-11-15T09:38:58,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741868_1044 (size=28706) 2024-11-15T09:38:58,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/fc0d257763a14feaa585c5f70b32c298 2024-11-15T09:38:58,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/fc0d257763a14feaa585c5f70b32c298 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fc0d257763a14feaa585c5f70b32c298 2024-11-15T09:38:58,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fc0d257763a14feaa585c5f70b32c298, entries=22, sequenceid=249, filesize=28.0 K 2024-11-15T09:38:58,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=5.25 KB/5380 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 19ms, sequenceid=249, compaction requested=true 2024-11-15T09:38:58,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:58,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:38:58,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:58,377 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:38:58,378 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 151497 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:38:58,379 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:38:58,379 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:38:58,379 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f43716a8f3254553bea3cb9d0b1ee384, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/b59c47997a664197b47f19cac1b0ca80, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fc0d257763a14feaa585c5f70b32c298] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=147.9 K 2024-11-15T09:38:58,379 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting f43716a8f3254553bea3cb9d0b1ee384, keycount=97, bloomtype=ROW, size=107.7 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1731663519938 2024-11-15T09:38:58,379 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting b59c47997a664197b47f19cac1b0ca80, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=224, earliestPutTs=1731663536300 2024-11-15T09:38:58,380 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc0d257763a14feaa585c5f70b32c298, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731663538320 2024-11-15T09:38:58,390 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#83 average throughput is 64.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:38:58,390 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/97430fb909d245e18e815c3708f9c970 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:38:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741869_1045 (size=141844) 2024-11-15T09:38:58,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741869_1045 (size=141844) 2024-11-15T09:38:58,398 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/97430fb909d245e18e815c3708f9c970 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/97430fb909d245e18e815c3708f9c970 2024-11-15T09:38:58,404 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into 97430fb909d245e18e815c3708f9c970(size=138.5 K), total size for store is 138.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:38:58,404 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:38:58,404 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663538377; duration=0sec 2024-11-15T09:38:58,404 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:38:58,404 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:38:59,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:38:59,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:00,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:00,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:00,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T09:39:00,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/719852886d8c461eac5b1ddb2bab2b89 is 1080, key is row0188/info:/1731663538359/Put/seqid=0 2024-11-15T09:39:00,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741870_1046 (size=12518) 2024-11-15T09:39:00,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741870_1046 (size=12518) 2024-11-15T09:39:00,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/719852886d8c461eac5b1ddb2bab2b89 2024-11-15T09:39:00,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/719852886d8c461eac5b1ddb2bab2b89 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/719852886d8c461eac5b1ddb2bab2b89 2024-11-15T09:39:00,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/719852886d8c461eac5b1ddb2bab2b89, entries=7, sequenceid=260, filesize=12.2 K 2024-11-15T09:39:00,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 24ms, sequenceid=260, compaction requested=false 2024-11-15T09:39:00,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:00,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:00,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T09:39:00,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/7efd210411424f9fb278f3e151afd567 is 1080, key is row0195/info:/1731663540379/Put/seqid=0 2024-11-15T09:39:00,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741871_1047 (size=15760) 2024-11-15T09:39:00,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741871_1047 (size=15760) 2024-11-15T09:39:00,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/7efd210411424f9fb278f3e151afd567 2024-11-15T09:39:00,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/7efd210411424f9fb278f3e151afd567 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/7efd210411424f9fb278f3e151afd567 2024-11-15T09:39:00,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/7efd210411424f9fb278f3e151afd567, entries=10, sequenceid=273, filesize=15.4 K 2024-11-15T09:39:00,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 21ms, sequenceid=273, compaction requested=true 2024-11-15T09:39:00,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:00,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:39:00,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:39:00,423 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:39:00,424 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 170122 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:39:00,424 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:39:00,424 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:00,424 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/97430fb909d245e18e815c3708f9c970, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/719852886d8c461eac5b1ddb2bab2b89, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/7efd210411424f9fb278f3e151afd567] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=166.1 K 2024-11-15T09:39:00,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:00,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T09:39:00,425 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97430fb909d245e18e815c3708f9c970, keycount=126, bloomtype=ROW, size=138.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731663519938 2024-11-15T09:39:00,425 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 719852886d8c461eac5b1ddb2bab2b89, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1731663538359 2024-11-15T09:39:00,425 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7efd210411424f9fb278f3e151afd567, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1731663540379 2024-11-15T09:39:00,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/fb9b87da55be4dc8b18ae71080210b77 is 1080, key is row0205/info:/1731663540403/Put/seqid=0 2024-11-15T09:39:00,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741872_1048 (size=16839) 2024-11-15T09:39:00,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741872_1048 (size=16839) 2024-11-15T09:39:00,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/fb9b87da55be4dc8b18ae71080210b77 2024-11-15T09:39:00,439 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#87 average throughput is 48.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:39:00,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/fb9b87da55be4dc8b18ae71080210b77 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fb9b87da55be4dc8b18ae71080210b77 2024-11-15T09:39:00,439 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/6035b981be1541bd92884f37bf2bcda4 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:39:00,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741873_1049 (size=160288) 2024-11-15T09:39:00,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741873_1049 (size=160288) 2024-11-15T09:39:00,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fb9b87da55be4dc8b18ae71080210b77, entries=11, sequenceid=287, filesize=16.4 K 2024-11-15T09:39:00,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 22ms, sequenceid=287, compaction requested=false 2024-11-15T09:39:00,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:00,450 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/6035b981be1541bd92884f37bf2bcda4 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/6035b981be1541bd92884f37bf2bcda4 2024-11-15T09:39:00,455 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into 6035b981be1541bd92884f37bf2bcda4(size=156.5 K), total size for store is 173.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:39:00,455 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:00,455 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663540423; duration=0sec 2024-11-15T09:39:00,455 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:39:00,455 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:39:01,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:01,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:02,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:02,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:02,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T09:39:02,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/77295548520f4d31b7267310feb7a012 is 1080, key is row0216/info:/1731663540425/Put/seqid=0 2024-11-15T09:39:02,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741874_1050 (size=15760) 2024-11-15T09:39:02,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741874_1050 (size=15760) 2024-11-15T09:39:02,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/77295548520f4d31b7267310feb7a012 2024-11-15T09:39:02,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/77295548520f4d31b7267310feb7a012 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/77295548520f4d31b7267310feb7a012 2024-11-15T09:39:02,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/77295548520f4d31b7267310feb7a012, entries=10, sequenceid=301, filesize=15.4 K 2024-11-15T09:39:02,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 23ms, sequenceid=301, compaction requested=true 2024-11-15T09:39:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:02,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:39:02,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:39:02,475 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:39:02,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:02,476 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192887 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:39:02,476 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:39:02,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T09:39:02,476 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:02,476 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/6035b981be1541bd92884f37bf2bcda4, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fb9b87da55be4dc8b18ae71080210b77, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/77295548520f4d31b7267310feb7a012] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=188.4 K 2024-11-15T09:39:02,476 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6035b981be1541bd92884f37bf2bcda4, keycount=143, bloomtype=ROW, size=156.5 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1731663519938 2024-11-15T09:39:02,477 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb9b87da55be4dc8b18ae71080210b77, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731663540403 2024-11-15T09:39:02,477 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77295548520f4d31b7267310feb7a012, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1731663540425 2024-11-15T09:39:02,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/4919bed56f9e45c58a1b7bbe0e072a11 is 1080, key is row0226/info:/1731663542455/Put/seqid=0 2024-11-15T09:39:02,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741875_1051 (size=17918) 2024-11-15T09:39:02,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741875_1051 (size=17918) 2024-11-15T09:39:02,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/4919bed56f9e45c58a1b7bbe0e072a11 2024-11-15T09:39:02,490 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#90 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:39:02,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/4919bed56f9e45c58a1b7bbe0e072a11 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/4919bed56f9e45c58a1b7bbe0e072a11 2024-11-15T09:39:02,491 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/0f6fa27a18f246388429878ab2e94655 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:39:02,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741876_1052 (size=183053) 2024-11-15T09:39:02,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741876_1052 (size=183053) 2024-11-15T09:39:02,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/4919bed56f9e45c58a1b7bbe0e072a11, entries=12, sequenceid=316, filesize=17.5 K 2024-11-15T09:39:02,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 22ms, sequenceid=316, compaction requested=false 2024-11-15T09:39:02,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:02,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37741 {}] regionserver.HRegion(8855): Flush requested on e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:02,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-15T09:39:02,502 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/0f6fa27a18f246388429878ab2e94655 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/0f6fa27a18f246388429878ab2e94655 2024-11-15T09:39:02,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/5a8aa7aa3fa64050a5b33a29211a697f is 1080, key is row0238/info:/1731663542477/Put/seqid=0 2024-11-15T09:39:02,509 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into 0f6fa27a18f246388429878ab2e94655(size=178.8 K), total size for store is 196.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:39:02,509 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:02,509 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663542474; duration=0sec 2024-11-15T09:39:02,509 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:39:02,509 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:39:02,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741877_1053 (size=16839) 2024-11-15T09:39:02,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741877_1053 (size=16839) 2024-11-15T09:39:02,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/5a8aa7aa3fa64050a5b33a29211a697f 2024-11-15T09:39:02,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/5a8aa7aa3fa64050a5b33a29211a697f as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/5a8aa7aa3fa64050a5b33a29211a697f 2024-11-15T09:39:02,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/5a8aa7aa3fa64050a5b33a29211a697f, entries=11, sequenceid=330, filesize=16.4 K 2024-11-15T09:39:02,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 22ms, sequenceid=330, compaction requested=true 2024-11-15T09:39:02,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:02,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e72c9b9e6bc67d6d04ec4895f22c5ab8:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T09:39:02,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:39:02,522 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T09:39:02,523 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 217810 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T09:39:02,523 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1541): e72c9b9e6bc67d6d04ec4895f22c5ab8/info is initiating minor compaction (all files) 2024-11-15T09:39:02,523 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e72c9b9e6bc67d6d04ec4895f22c5ab8/info in TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:02,523 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/0f6fa27a18f246388429878ab2e94655, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/4919bed56f9e45c58a1b7bbe0e072a11, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/5a8aa7aa3fa64050a5b33a29211a697f] into tmpdir=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp, totalSize=212.7 K 2024-11-15T09:39:02,524 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f6fa27a18f246388429878ab2e94655, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1731663519938 2024-11-15T09:39:02,524 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4919bed56f9e45c58a1b7bbe0e072a11, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731663542455 2024-11-15T09:39:02,524 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a8aa7aa3fa64050a5b33a29211a697f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1731663542477 2024-11-15T09:39:02,535 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e72c9b9e6bc67d6d04ec4895f22c5ab8#info#compaction#92 average throughput is 63.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T09:39:02,536 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/979cca2f214b4b94bbdad4c1d172c395 is 1080, key is row0062/info:/1731663519938/Put/seqid=0 2024-11-15T09:39:02,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741878_1054 (size=208049) 2024-11-15T09:39:02,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741878_1054 (size=208049) 2024-11-15T09:39:02,543 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/979cca2f214b4b94bbdad4c1d172c395 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/979cca2f214b4b94bbdad4c1d172c395 2024-11-15T09:39:02,549 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e72c9b9e6bc67d6d04ec4895f22c5ab8/info of e72c9b9e6bc67d6d04ec4895f22c5ab8 into 979cca2f214b4b94bbdad4c1d172c395(size=203.2 K), total size for store is 203.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T09:39:02,549 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:02,549 INFO [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., storeName=e72c9b9e6bc67d6d04ec4895f22c5ab8/info, priority=13, startTime=1731663542522; duration=0sec 2024-11-15T09:39:02,549 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T09:39:02,549 DEBUG [RS:0;791f12959b23:37741-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e72c9b9e6bc67d6d04ec4895f22c5ab8:info 2024-11-15T09:39:03,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:03,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:04,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:04,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:04,517 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-15T09:39:04,518 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37741%2C1731663506439.1731663544518 2024-11-15T09:39:04,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,527 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,527 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,528 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663507084 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663544518 2024-11-15T09:39:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741833_1009 (size=317837) 2024-11-15T09:39:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741833_1009 (size=317837) 2024-11-15T09:39:04,536 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36185:36185),(127.0.0.1/127.0.0.1:43221:43221)] 2024-11-15T09:39:04,540 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e72c9b9e6bc67d6d04ec4895f22c5ab8 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-15T09:39:04,543 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/e613586bebc24f3d94d60b3e6eef5b32 is 1080, key is row0249/info:/1731663542502/Put/seqid=0 2024-11-15T09:39:04,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741880_1056 (size=13602) 2024-11-15T09:39:04,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741880_1056 (size=13602) 2024-11-15T09:39:04,548 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/e613586bebc24f3d94d60b3e6eef5b32 2024-11-15T09:39:04,553 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/.tmp/info/e613586bebc24f3d94d60b3e6eef5b32 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/e613586bebc24f3d94d60b3e6eef5b32 2024-11-15T09:39:04,558 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/e613586bebc24f3d94d60b3e6eef5b32, entries=8, sequenceid=343, filesize=13.3 K 2024-11-15T09:39:04,559 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for e72c9b9e6bc67d6d04ec4895f22c5ab8 in 19ms, sequenceid=343, compaction requested=false 2024-11-15T09:39:04,559 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: 2024-11-15T09:39:04,559 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-15T09:39:04,563 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/info/22ac8989b0f3483c992865b987191e86 is 193, key is TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8./info:regioninfo/1731663522809/Put/seqid=0 2024-11-15T09:39:04,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741881_1057 (size=6223) 2024-11-15T09:39:04,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741881_1057 (size=6223) 2024-11-15T09:39:04,567 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/info/22ac8989b0f3483c992865b987191e86 2024-11-15T09:39:04,571 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/.tmp/info/22ac8989b0f3483c992865b987191e86 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/info/22ac8989b0f3483c992865b987191e86 2024-11-15T09:39:04,576 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/info/22ac8989b0f3483c992865b987191e86, entries=5, sequenceid=21, filesize=6.1 K 2024-11-15T09:39:04,577 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-15T09:39:04,577 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T09:39:04,577 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e786ded921dfe1055e05e19faacf9ea9: 2024-11-15T09:39:04,577 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C37741%2C1731663506439.1731663544577 2024-11-15T09:39:04,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,582 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663544518 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663544577 2024-11-15T09:39:04,582 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43221:43221),(127.0.0.1/127.0.0.1:36185:36185)] 2024-11-15T09:39:04,583 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663544518 is not closed yet, will try archiving it next time 2024-11-15T09:39:04,583 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663507084 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/oldWALs/791f12959b23%2C37741%2C1731663506439.1731663507084 2024-11-15T09:39:04,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741879_1055 (size=731) 2024-11-15T09:39:04,583 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T09:39:04,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741879_1055 (size=731) 2024-11-15T09:39:04,584 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/WALs/791f12959b23,37741,1731663506439/791f12959b23%2C37741%2C1731663506439.1731663544518 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/oldWALs/791f12959b23%2C37741%2C1731663506439.1731663544518 2024-11-15T09:39:04,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:39:04,684 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:39:04,684 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:39:04,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:04,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:04,684 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:39:04,684 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:39:04,684 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1309291058, stopped=false 2024-11-15T09:39:04,685 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,35857,1731663506251 2024-11-15T09:39:04,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:39:04,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:39:04,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:04,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:04,739 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:39:04,740 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:39:04,740 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:39:04,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:04,741 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:39:04,741 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:39:04,741 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,37741,1731663506439' ***** 2024-11-15T09:39:04,741 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:39:04,742 INFO [RS:0;791f12959b23:37741 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:39:04,743 INFO [RS:0;791f12959b23:37741 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:39:04,743 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:39:04,743 INFO [RS:0;791f12959b23:37741 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:39:04,743 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(3091): Received CLOSE for e72c9b9e6bc67d6d04ec4895f22c5ab8 2024-11-15T09:39:04,743 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(3091): Received CLOSE for e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:39:04,743 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,37741,1731663506439 2024-11-15T09:39:04,743 INFO [RS:0;791f12959b23:37741 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:39:04,743 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e72c9b9e6bc67d6d04ec4895f22c5ab8, disabling compactions & flushes 2024-11-15T09:39:04,744 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:04,744 INFO [RS:0;791f12959b23:37741 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:37741. 2024-11-15T09:39:04,744 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:04,744 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. after waiting 0 ms 2024-11-15T09:39:04,744 DEBUG [RS:0;791f12959b23:37741 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:39:04,744 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:04,744 DEBUG [RS:0;791f12959b23:37741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:04,744 INFO [RS:0;791f12959b23:37741 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:39:04,744 INFO [RS:0;791f12959b23:37741 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:39:04,745 INFO [RS:0;791f12959b23:37741 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:39:04,745 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:39:04,745 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-15T09:39:04,745 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1325): Online Regions={e72c9b9e6bc67d6d04ec4895f22c5ab8=TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8., 1588230740=hbase:meta,,1.1588230740, e786ded921dfe1055e05e19faacf9ea9=TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.} 2024-11-15T09:39:04,745 DEBUG [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e72c9b9e6bc67d6d04ec4895f22c5ab8, e786ded921dfe1055e05e19faacf9ea9 2024-11-15T09:39:04,745 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:39:04,745 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:39:04,746 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:39:04,746 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:39:04,746 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:39:04,745 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478->hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b-top, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/42b2556167ed4e42b9e8eb6ba81d00ec, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f6e8493d441b4182a8ab0435bf5b3a6d, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/c4df9acb8ecc445db0397d55c2ced781, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/637b0657e2724f939db581052f15cd55, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d21a6ea1a6b04de4a6a736ff864336b0, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/cad0f07ce9a04d0dacb2a2a706322d97, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/09bf034ca5b440b3821af696124f6978, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/3af24c4cf3e74b81afdb6f3f34afcd7e, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f43716a8f3254553bea3cb9d0b1ee384, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d38ed6bf6abe49b6b1d8b4767b0c460c, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/b59c47997a664197b47f19cac1b0ca80, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/97430fb909d245e18e815c3708f9c970, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fc0d257763a14feaa585c5f70b32c298, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/719852886d8c461eac5b1ddb2bab2b89, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/6035b981be1541bd92884f37bf2bcda4, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/7efd210411424f9fb278f3e151afd567, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fb9b87da55be4dc8b18ae71080210b77, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/0f6fa27a18f246388429878ab2e94655, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/77295548520f4d31b7267310feb7a012, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/4919bed56f9e45c58a1b7bbe0e072a11, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/5a8aa7aa3fa64050a5b33a29211a697f] to archive 2024-11-15T09:39:04,747 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T09:39:04,748 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478 2024-11-15T09:39:04,750 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-a8483b36e42c47228d0b852f286f6fb9 2024-11-15T09:39:04,750 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-15T09:39:04,750 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:39:04,750 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:39:04,751 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663544745Running coprocessor pre-close hooks at 1731663544745Disabling compacts and flushes for region at 1731663544745Disabling writes for close at 1731663544746 (+1 ms)Writing region close event to WAL at 1731663544747 (+1 ms)Running coprocessor post-close hooks at 1731663544750 (+3 ms)Closed at 1731663544750 2024-11-15T09:39:04,751 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:39:04,751 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/42b2556167ed4e42b9e8eb6ba81d00ec to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/42b2556167ed4e42b9e8eb6ba81d00ec 2024-11-15T09:39:04,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/TestLogRolling-testLogRolling=aa2be1855009106bab2804d761e03478-f7e585636945453a9851fb96ac2ec7a5 2024-11-15T09:39:04,753 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f6e8493d441b4182a8ab0435bf5b3a6d to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f6e8493d441b4182a8ab0435bf5b3a6d 2024-11-15T09:39:04,754 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/c4df9acb8ecc445db0397d55c2ced781 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/c4df9acb8ecc445db0397d55c2ced781 2024-11-15T09:39:04,755 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/637b0657e2724f939db581052f15cd55 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/637b0657e2724f939db581052f15cd55 2024-11-15T09:39:04,756 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d21a6ea1a6b04de4a6a736ff864336b0 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d21a6ea1a6b04de4a6a736ff864336b0 2024-11-15T09:39:04,757 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/cad0f07ce9a04d0dacb2a2a706322d97 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/cad0f07ce9a04d0dacb2a2a706322d97 2024-11-15T09:39:04,758 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/09bf034ca5b440b3821af696124f6978 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/09bf034ca5b440b3821af696124f6978 2024-11-15T09:39:04,759 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/3af24c4cf3e74b81afdb6f3f34afcd7e to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/3af24c4cf3e74b81afdb6f3f34afcd7e 2024-11-15T09:39:04,760 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f43716a8f3254553bea3cb9d0b1ee384 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/f43716a8f3254553bea3cb9d0b1ee384 2024-11-15T09:39:04,761 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d38ed6bf6abe49b6b1d8b4767b0c460c to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/d38ed6bf6abe49b6b1d8b4767b0c460c 2024-11-15T09:39:04,762 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/b59c47997a664197b47f19cac1b0ca80 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/b59c47997a664197b47f19cac1b0ca80 2024-11-15T09:39:04,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/97430fb909d245e18e815c3708f9c970 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/97430fb909d245e18e815c3708f9c970 2024-11-15T09:39:04,764 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fc0d257763a14feaa585c5f70b32c298 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fc0d257763a14feaa585c5f70b32c298 2024-11-15T09:39:04,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/719852886d8c461eac5b1ddb2bab2b89 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/719852886d8c461eac5b1ddb2bab2b89 2024-11-15T09:39:04,766 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/6035b981be1541bd92884f37bf2bcda4 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/6035b981be1541bd92884f37bf2bcda4 2024-11-15T09:39:04,767 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/7efd210411424f9fb278f3e151afd567 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/7efd210411424f9fb278f3e151afd567 2024-11-15T09:39:04,768 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fb9b87da55be4dc8b18ae71080210b77 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/fb9b87da55be4dc8b18ae71080210b77 2024-11-15T09:39:04,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/0f6fa27a18f246388429878ab2e94655 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/0f6fa27a18f246388429878ab2e94655 2024-11-15T09:39:04,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/77295548520f4d31b7267310feb7a012 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/77295548520f4d31b7267310feb7a012 2024-11-15T09:39:04,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/4919bed56f9e45c58a1b7bbe0e072a11 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/4919bed56f9e45c58a1b7bbe0e072a11 2024-11-15T09:39:04,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/5a8aa7aa3fa64050a5b33a29211a697f to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/info/5a8aa7aa3fa64050a5b33a29211a697f 2024-11-15T09:39:04,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=791f12959b23:35857 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T09:39:04,773 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [42b2556167ed4e42b9e8eb6ba81d00ec=42984, f6e8493d441b4182a8ab0435bf5b3a6d=12516, c4df9acb8ecc445db0397d55c2ced781=64714, 637b0657e2724f939db581052f15cd55=19000, d21a6ea1a6b04de4a6a736ff864336b0=17906, cad0f07ce9a04d0dacb2a2a706322d97=85371, 09bf034ca5b440b3821af696124f6978=12516, 3af24c4cf3e74b81afdb6f3f34afcd7e=16828, f43716a8f3254553bea3cb9d0b1ee384=110275, d38ed6bf6abe49b6b1d8b4767b0c460c=17906, b59c47997a664197b47f19cac1b0ca80=12516, 97430fb909d245e18e815c3708f9c970=141844, fc0d257763a14feaa585c5f70b32c298=28706, 719852886d8c461eac5b1ddb2bab2b89=12518, 6035b981be1541bd92884f37bf2bcda4=160288, 7efd210411424f9fb278f3e151afd567=15760, fb9b87da55be4dc8b18ae71080210b77=16839, 0f6fa27a18f246388429878ab2e94655=183053, 77295548520f4d31b7267310feb7a012=15760, 4919bed56f9e45c58a1b7bbe0e072a11=17918, 5a8aa7aa3fa64050a5b33a29211a697f=16839] 2024-11-15T09:39:04,776 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e72c9b9e6bc67d6d04ec4895f22c5ab8/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-11-15T09:39:04,777 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:04,777 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e72c9b9e6bc67d6d04ec4895f22c5ab8: Waiting for close lock at 1731663544743Running coprocessor pre-close hooks at 1731663544743Disabling compacts and flushes for region at 1731663544743Disabling writes for close at 1731663544744 (+1 ms)Writing region close event to WAL at 1731663544774 (+30 ms)Running coprocessor post-close hooks at 1731663544777 (+3 ms)Closed at 1731663544777 2024-11-15T09:39:04,777 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731663522073.e72c9b9e6bc67d6d04ec4895f22c5ab8. 2024-11-15T09:39:04,777 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e786ded921dfe1055e05e19faacf9ea9, disabling compactions & flushes 2024-11-15T09:39:04,777 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:39:04,777 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:39:04,777 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. after waiting 0 ms 2024-11-15T09:39:04,777 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:39:04,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478->hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/aa2be1855009106bab2804d761e03478/info/734ec7c61b46446bb86d3cd0f9e0084b-bottom] to archive 2024-11-15T09:39:04,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T09:39:04,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478 to hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/archive/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/info/734ec7c61b46446bb86d3cd0f9e0084b.aa2be1855009106bab2804d761e03478 2024-11-15T09:39:04,780 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-15T09:39:04,785 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/data/default/TestLogRolling-testLogRolling/e786ded921dfe1055e05e19faacf9ea9/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-15T09:39:04,785 INFO [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:39:04,786 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e786ded921dfe1055e05e19faacf9ea9: Waiting for close lock at 1731663544777Running coprocessor pre-close hooks at 1731663544777Disabling compacts and flushes for region at 1731663544777Disabling writes for close at 1731663544777Writing region close event to WAL at 1731663544781 (+4 ms)Running coprocessor post-close hooks at 1731663544785 (+4 ms)Closed at 1731663544785 2024-11-15T09:39:04,786 DEBUG [RS_CLOSE_REGION-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731663522073.e786ded921dfe1055e05e19faacf9ea9. 2024-11-15T09:39:04,946 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,37741,1731663506439; all regions closed. 2024-11-15T09:39:04,946 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,946 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,946 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,946 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,946 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741834_1010 (size=8107) 2024-11-15T09:39:04,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741834_1010 (size=8107) 2024-11-15T09:39:04,950 DEBUG [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/oldWALs 2024-11-15T09:39:04,950 INFO [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C37741%2C1731663506439.meta:.meta(num 1731663507481) 2024-11-15T09:39:04,951 INFO [regionserver/791f12959b23:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:39:04,951 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741882_1058 (size=780) 2024-11-15T09:39:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741882_1058 (size=780) 2024-11-15T09:39:04,954 DEBUG [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/oldWALs 2024-11-15T09:39:04,954 INFO [RS:0;791f12959b23:37741 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C37741%2C1731663506439:(num 1731663544577) 2024-11-15T09:39:04,954 DEBUG [RS:0;791f12959b23:37741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:04,954 INFO [RS:0;791f12959b23:37741 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:39:04,954 INFO [RS:0;791f12959b23:37741 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:39:04,955 INFO [RS:0;791f12959b23:37741 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T09:39:04,955 INFO [RS:0;791f12959b23:37741 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:39:04,955 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:39:04,955 INFO [RS:0;791f12959b23:37741 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37741 2024-11-15T09:39:04,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,37741,1731663506439 2024-11-15T09:39:04,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:39:04,959 INFO [RS:0;791f12959b23:37741 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:39:04,970 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,37741,1731663506439] 2024-11-15T09:39:04,980 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,37741,1731663506439 already deleted, retry=false 2024-11-15T09:39:04,980 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,37741,1731663506439 expired; onlineServers=0 2024-11-15T09:39:04,980 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,35857,1731663506251' ***** 2024-11-15T09:39:04,980 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:39:04,980 INFO [M:0;791f12959b23:35857 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:39:04,980 INFO [M:0;791f12959b23:35857 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:39:04,980 DEBUG [M:0;791f12959b23:35857 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:39:04,981 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:39:04,981 DEBUG [M:0;791f12959b23:35857 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:39:04,981 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663506803 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663506803,5,FailOnTimeoutGroup] 2024-11-15T09:39:04,981 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663506808 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663506808,5,FailOnTimeoutGroup] 2024-11-15T09:39:04,981 INFO [M:0;791f12959b23:35857 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:39:04,981 INFO [M:0;791f12959b23:35857 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:39:04,981 DEBUG [M:0;791f12959b23:35857 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:39:04,981 INFO [M:0;791f12959b23:35857 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:39:04,981 INFO [M:0;791f12959b23:35857 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:39:04,981 INFO [M:0;791f12959b23:35857 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:39:04,981 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:39:05,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:05,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:05,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:39:05,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:05,065 DEBUG [M:0;791f12959b23:35857 {}] zookeeper.ZKUtil(347): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:39:05,065 WARN [M:0;791f12959b23:35857 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:39:05,066 INFO [M:0;791f12959b23:35857 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/.lastflushedseqids 2024-11-15T09:39:05,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:05,070 INFO [RS:0;791f12959b23:37741 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:39:05,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37741-0x1013ddc26e60001, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:05,070 INFO [RS:0;791f12959b23:37741 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,37741,1731663506439; zookeeper connection closed. 2024-11-15T09:39:05,071 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70fe1114 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70fe1114 2024-11-15T09:39:05,072 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T09:39:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741883_1059 (size=228) 2024-11-15T09:39:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741883_1059 (size=228) 2024-11-15T09:39:05,076 INFO [M:0;791f12959b23:35857 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:39:05,076 INFO [M:0;791f12959b23:35857 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:39:05,077 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:39:05,077 INFO [M:0;791f12959b23:35857 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:05,077 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:05,077 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:39:05,077 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:05,077 INFO [M:0;791f12959b23:35857 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-15T09:39:05,092 DEBUG [M:0;791f12959b23:35857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5973b8c6680946b2b6aa046eaf0ea778 is 82, key is hbase:meta,,1/info:regioninfo/1731663507512/Put/seqid=0 2024-11-15T09:39:05,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741884_1060 (size=5672) 2024-11-15T09:39:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741884_1060 (size=5672) 2024-11-15T09:39:05,096 INFO [M:0;791f12959b23:35857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5973b8c6680946b2b6aa046eaf0ea778 2024-11-15T09:39:05,114 DEBUG [M:0;791f12959b23:35857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b943aa6f32b4590b30b80cf275d3791 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731663508050/Put/seqid=0 2024-11-15T09:39:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741885_1061 (size=7089) 2024-11-15T09:39:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741885_1061 (size=7089) 2024-11-15T09:39:05,119 INFO [M:0;791f12959b23:35857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b943aa6f32b4590b30b80cf275d3791 2024-11-15T09:39:05,123 INFO [M:0;791f12959b23:35857 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b943aa6f32b4590b30b80cf275d3791 2024-11-15T09:39:05,135 DEBUG [M:0;791f12959b23:35857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbe289b020eb424dafe8526248ecdb1a is 69, key is 791f12959b23,37741,1731663506439/rs:state/1731663506916/Put/seqid=0 2024-11-15T09:39:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741886_1062 (size=5156) 2024-11-15T09:39:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741886_1062 (size=5156) 2024-11-15T09:39:05,140 INFO [M:0;791f12959b23:35857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbe289b020eb424dafe8526248ecdb1a 2024-11-15T09:39:05,156 DEBUG [M:0;791f12959b23:35857 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/af751c6c77154caaa97cc7aaae4863cf is 52, key is load_balancer_on/state:d/1731663507674/Put/seqid=0 2024-11-15T09:39:05,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741887_1063 (size=5056) 2024-11-15T09:39:05,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741887_1063 (size=5056) 2024-11-15T09:39:05,161 INFO [M:0;791f12959b23:35857 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/af751c6c77154caaa97cc7aaae4863cf 2024-11-15T09:39:05,165 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5973b8c6680946b2b6aa046eaf0ea778 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5973b8c6680946b2b6aa046eaf0ea778 2024-11-15T09:39:05,169 INFO [M:0;791f12959b23:35857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5973b8c6680946b2b6aa046eaf0ea778, entries=8, sequenceid=125, filesize=5.5 K 2024-11-15T09:39:05,170 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b943aa6f32b4590b30b80cf275d3791 as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b943aa6f32b4590b30b80cf275d3791 2024-11-15T09:39:05,173 INFO [M:0;791f12959b23:35857 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b943aa6f32b4590b30b80cf275d3791 2024-11-15T09:39:05,174 INFO [M:0;791f12959b23:35857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b943aa6f32b4590b30b80cf275d3791, entries=13, sequenceid=125, filesize=6.9 K 2024-11-15T09:39:05,174 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fbe289b020eb424dafe8526248ecdb1a as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fbe289b020eb424dafe8526248ecdb1a 2024-11-15T09:39:05,178 INFO [M:0;791f12959b23:35857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fbe289b020eb424dafe8526248ecdb1a, entries=1, sequenceid=125, filesize=5.0 K 2024-11-15T09:39:05,179 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/af751c6c77154caaa97cc7aaae4863cf as hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/af751c6c77154caaa97cc7aaae4863cf 2024-11-15T09:39:05,182 INFO [M:0;791f12959b23:35857 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38379/user/jenkins/test-data/80c44a99-e1db-cc74-86af-d70388797657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/af751c6c77154caaa97cc7aaae4863cf, entries=1, sequenceid=125, filesize=4.9 K 2024-11-15T09:39:05,183 INFO [M:0;791f12959b23:35857 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=125, compaction requested=false 2024-11-15T09:39:05,184 INFO [M:0;791f12959b23:35857 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:05,184 DEBUG [M:0;791f12959b23:35857 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663545077Disabling compacts and flushes for region at 1731663545077Disabling writes for close at 1731663545077Obtaining lock to block concurrent updates at 1731663545077Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663545077Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731663545078 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663545079 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663545079Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663545091 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663545091Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663545100 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663545113 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663545113Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663545123 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663545135 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663545135Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663545144 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663545156 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663545156Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a9ca244: reopening flushed file at 1731663545164 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@446e1f87: reopening flushed file at 1731663545169 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ca5af3: reopening flushed file at 1731663545174 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@550e0ca6: reopening flushed file at 1731663545178 (+4 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=125, compaction requested=false at 1731663545183 (+5 ms)Writing region close event to WAL at 1731663545184 (+1 ms)Closed at 1731663545184 2024-11-15T09:39:05,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:05,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:05,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:05,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:05,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:05,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35603 is added to blk_1073741830_1006 (size=61320) 2024-11-15T09:39:05,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35841 is added to blk_1073741830_1006 (size=61320) 2024-11-15T09:39:05,187 INFO [M:0;791f12959b23:35857 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:39:05,187 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:39:05,187 INFO [M:0;791f12959b23:35857 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35857 2024-11-15T09:39:05,187 INFO [M:0;791f12959b23:35857 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:39:05,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:05,291 INFO [M:0;791f12959b23:35857 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:39:05,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35857-0x1013ddc26e60000, quorum=127.0.0.1:54078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:05,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3323ea67{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:39:05,298 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fc2e7d1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:39:05,298 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:39:05,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d4c2da4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:39:05,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47c8059{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir/,STOPPED} 2024-11-15T09:39:05,301 WARN [BP-253373432-172.17.0.2-1731663503792 heartbeating to localhost/127.0.0.1:38379 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:39:05,301 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:39:05,301 WARN [BP-253373432-172.17.0.2-1731663503792 heartbeating to localhost/127.0.0.1:38379 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-253373432-172.17.0.2-1731663503792 (Datanode Uuid 778bcdf5-dacb-4a27-abdc-69e3fd2cfb52) service to localhost/127.0.0.1:38379 2024-11-15T09:39:05,301 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:39:05,301 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data3/current/BP-253373432-172.17.0.2-1731663503792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:05,301 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data4/current/BP-253373432-172.17.0.2-1731663503792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:05,302 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:39:05,304 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b32401d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:39:05,304 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60b9b83d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:39:05,304 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:39:05,304 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77b370f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:39:05,304 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37b300d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir/,STOPPED} 2024-11-15T09:39:05,306 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:39:05,306 WARN [BP-253373432-172.17.0.2-1731663503792 heartbeating to localhost/127.0.0.1:38379 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:39:05,306 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:39:05,306 WARN [BP-253373432-172.17.0.2-1731663503792 heartbeating to localhost/127.0.0.1:38379 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-253373432-172.17.0.2-1731663503792 (Datanode Uuid 7ccca9ce-ce10-4210-b4d7-f84f5097cfbe) service to localhost/127.0.0.1:38379 2024-11-15T09:39:05,306 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data1/current/BP-253373432-172.17.0.2-1731663503792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:05,306 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/cluster_eb774146-251b-089f-6068-5cec19bf491c/data/data2/current/BP-253373432-172.17.0.2-1731663503792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:05,306 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:39:05,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@551592b1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:39:05,312 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e1b4695{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:39:05,312 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:39:05,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bc081d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:39:05,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2996c87f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir/,STOPPED} 2024-11-15T09:39:05,319 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:39:05,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:39:05,353 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38379 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38379 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38379 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38379 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38379 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38379 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38379 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38379 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=3917 (was 3125) - AvailableMemoryMB LEAK? - 2024-11-15T09:39:05,360 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=164, ProcessCount=11, AvailableMemoryMB=3917 2024-11-15T09:39:05,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.log.dir so I do NOT create it in target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/871613f3-4971-f54f-5184-547f5dabb95e/hadoop.tmp.dir so I do NOT create it in target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c, deleteOnExit=true 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/test.cache.data in system properties and HBase conf 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir in system properties and HBase conf 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T09:39:05,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T09:39:05,361 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/nfs.dump.dir in system properties and HBase conf 2024-11-15T09:39:05,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/java.io.tmpdir in system properties and HBase conf 2024-11-15T09:39:05,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T09:39:05,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T09:39:05,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T09:39:05,379 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:39:05,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:39:05,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:39:05,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:39:05,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:39:05,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:39:05,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:39:05,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@589971c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:39:05,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8035060{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:39:05,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@349fe29e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/java.io.tmpdir/jetty-localhost-35257-hadoop-hdfs-3_4_1-tests_jar-_-any-12285307382551453588/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:39:05,845 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d88d0b5{HTTP/1.1, (http/1.1)}{localhost:35257} 2024-11-15T09:39:05,845 INFO [Time-limited test {}] server.Server(415): Started @292278ms 2024-11-15T09:39:05,856 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T09:39:06,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:06,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:06,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:39:06,086 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:39:06,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:39:06,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:39:06,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:39:06,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cb35637{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:39:06,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@591676f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:39:06,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@186b2d16{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/java.io.tmpdir/jetty-localhost-36511-hadoop-hdfs-3_4_1-tests_jar-_-any-7236179758712302050/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:39:06,180 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e3ca285{HTTP/1.1, (http/1.1)}{localhost:36511} 2024-11-15T09:39:06,180 INFO [Time-limited test {}] server.Server(415): Started @292613ms 2024-11-15T09:39:06,181 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:39:06,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T09:39:06,229 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T09:39:06,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T09:39:06,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T09:39:06,229 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T09:39:06,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dbd8c23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir/,AVAILABLE} 2024-11-15T09:39:06,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@734134af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T09:39:06,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16c97378{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/java.io.tmpdir/jetty-localhost-35133-hadoop-hdfs-3_4_1-tests_jar-_-any-4961315744518936581/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:39:06,323 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61ab51b1{HTTP/1.1, (http/1.1)}{localhost:35133} 2024-11-15T09:39:06,323 INFO [Time-limited test {}] server.Server(415): Started @292756ms 2024-11-15T09:39:06,324 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T09:39:07,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:07,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:07,661 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data1/current/BP-1257620986-172.17.0.2-1731663545383/current, will proceed with Du for space computation calculation, 2024-11-15T09:39:07,661 WARN [Thread-2503 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data2/current/BP-1257620986-172.17.0.2-1731663545383/current, will proceed with Du for space computation calculation, 2024-11-15T09:39:07,681 WARN [Thread-2466 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:39:07,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f2a8628cad48db2 with lease ID 0x780440db40a7bf5a: Processing first storage report for DS-1ad71036-f68c-4fdb-9b0b-7d7150836366 from datanode DatanodeRegistration(127.0.0.1:42195, datanodeUuid=17685e14-64ed-41fd-9281-a7def1f629e3, infoPort=43163, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383) 2024-11-15T09:39:07,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f2a8628cad48db2 with lease ID 0x780440db40a7bf5a: from storage DS-1ad71036-f68c-4fdb-9b0b-7d7150836366 node DatanodeRegistration(127.0.0.1:42195, datanodeUuid=17685e14-64ed-41fd-9281-a7def1f629e3, infoPort=43163, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:39:07,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f2a8628cad48db2 with lease ID 0x780440db40a7bf5a: Processing first storage report for DS-c9dae553-5a2e-4bf3-96dc-2defdfb34c1d from datanode DatanodeRegistration(127.0.0.1:42195, datanodeUuid=17685e14-64ed-41fd-9281-a7def1f629e3, infoPort=43163, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383) 2024-11-15T09:39:07,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f2a8628cad48db2 with lease ID 0x780440db40a7bf5a: from storage DS-c9dae553-5a2e-4bf3-96dc-2defdfb34c1d node DatanodeRegistration(127.0.0.1:42195, datanodeUuid=17685e14-64ed-41fd-9281-a7def1f629e3, infoPort=43163, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:39:07,807 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data3/current/BP-1257620986-172.17.0.2-1731663545383/current, will proceed with Du for space computation calculation, 2024-11-15T09:39:07,807 WARN [Thread-2514 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data4/current/BP-1257620986-172.17.0.2-1731663545383/current, will proceed with Du for space computation calculation, 2024-11-15T09:39:07,824 WARN [Thread-2489 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T09:39:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe0278004b5aa3c3 with lease ID 0x780440db40a7bf5b: Processing first storage report for DS-1a322ee4-6df3-485a-958b-daf2a369a603 from datanode DatanodeRegistration(127.0.0.1:39711, datanodeUuid=30856fbf-559c-48c0-91e7-faecca3e3558, infoPort=39329, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383) 2024-11-15T09:39:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe0278004b5aa3c3 with lease ID 0x780440db40a7bf5b: from storage DS-1a322ee4-6df3-485a-958b-daf2a369a603 node DatanodeRegistration(127.0.0.1:39711, datanodeUuid=30856fbf-559c-48c0-91e7-faecca3e3558, infoPort=39329, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:39:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe0278004b5aa3c3 with lease ID 0x780440db40a7bf5b: Processing first storage report for DS-21c099db-d839-406f-bf21-92b819266a2d from datanode DatanodeRegistration(127.0.0.1:39711, datanodeUuid=30856fbf-559c-48c0-91e7-faecca3e3558, infoPort=39329, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383) 2024-11-15T09:39:07,826 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe0278004b5aa3c3 with lease ID 0x780440db40a7bf5b: from storage DS-21c099db-d839-406f-bf21-92b819266a2d node DatanodeRegistration(127.0.0.1:39711, datanodeUuid=30856fbf-559c-48c0-91e7-faecca3e3558, infoPort=39329, infoSecurePort=0, ipcPort=35291, storageInfo=lv=-57;cid=testClusterID;nsid=13684587;c=1731663545383), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T09:39:07,856 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57 2024-11-15T09:39:07,859 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/zookeeper_0, clientPort=50165, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T09:39:07,860 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50165 2024-11-15T09:39:07,860 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:07,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:07,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:39:07,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741825_1001 (size=7) 2024-11-15T09:39:07,870 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0 with version=8 2024-11-15T09:39:07,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35471/user/jenkins/test-data/762c6fef-8092-57c1-000f-88d77efbaee7/hbase-staging 2024-11-15T09:39:07,872 INFO [Time-limited test {}] client.ConnectionUtils(128): master/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T09:39:07,872 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:39:07,873 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44059 2024-11-15T09:39:07,873 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44059 connecting to ZooKeeper ensemble=127.0.0.1:50165 2024-11-15T09:39:07,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:440590x0, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:39:07,952 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44059-0x1013ddcc97e0000 connected 2024-11-15T09:39:08,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:08,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:08,037 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:08,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:08,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:39:08,045 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0, hbase.cluster.distributed=false 2024-11-15T09:39:08,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:39:08,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44059 2024-11-15T09:39:08,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44059 2024-11-15T09:39:08,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44059 2024-11-15T09:39:08,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44059 2024-11-15T09:39:08,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44059 2024-11-15T09:39:08,062 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/791f12959b23:0 server-side Connection retries=45 2024-11-15T09:39:08,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T09:39:08,063 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36281 2024-11-15T09:39:08,064 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36281 connecting to ZooKeeper ensemble=127.0.0.1:50165 2024-11-15T09:39:08,064 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:08,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:08,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:362810x0, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T09:39:08,079 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:362810x0, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:39:08,079 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36281-0x1013ddcc97e0001 connected 2024-11-15T09:39:08,079 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T09:39:08,080 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T09:39:08,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T09:39:08,082 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T09:39:08,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36281 2024-11-15T09:39:08,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36281 2024-11-15T09:39:08,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36281 2024-11-15T09:39:08,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36281 2024-11-15T09:39:08,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36281 2024-11-15T09:39:08,097 DEBUG [M:0;791f12959b23:44059 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;791f12959b23:44059 2024-11-15T09:39:08,098 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/791f12959b23,44059,1731663547871 2024-11-15T09:39:08,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:39:08,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:39:08,107 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/791f12959b23,44059,1731663547871 2024-11-15T09:39:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T09:39:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,118 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T09:39:08,118 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/791f12959b23,44059,1731663547871 from backup master directory 2024-11-15T09:39:08,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/791f12959b23,44059,1731663547871 2024-11-15T09:39:08,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:39:08,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T09:39:08,128 WARN [master/791f12959b23:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:39:08,128 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=791f12959b23,44059,1731663547871 2024-11-15T09:39:08,134 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/hbase.id] with ID: d97a6a11-0b96-4b5b-babc-22e16bde4fb1 2024-11-15T09:39:08,134 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/.tmp/hbase.id 2024-11-15T09:39:08,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:39:08,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741826_1002 (size=42) 2024-11-15T09:39:08,142 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/.tmp/hbase.id]:[hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/hbase.id] 2024-11-15T09:39:08,151 INFO [master/791f12959b23:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:08,151 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T09:39:08,152 INFO [master/791f12959b23:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T09:39:08,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:39:08,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741827_1003 (size=196) 2024-11-15T09:39:08,171 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T09:39:08,172 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T09:39:08,173 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:39:08,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:39:08,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741828_1004 (size=1189) 2024-11-15T09:39:08,184 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store 2024-11-15T09:39:08,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:39:08,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741829_1005 (size=34) 2024-11-15T09:39:08,190 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:39:08,190 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:39:08,190 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:08,190 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:08,190 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:39:08,190 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:08,190 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:08,190 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663548190Disabling compacts and flushes for region at 1731663548190Disabling writes for close at 1731663548190Writing region close event to WAL at 1731663548190Closed at 1731663548190 2024-11-15T09:39:08,191 WARN [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/.initializing 2024-11-15T09:39:08,191 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/WALs/791f12959b23,44059,1731663547871 2024-11-15T09:39:08,193 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C44059%2C1731663547871, suffix=, logDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/WALs/791f12959b23,44059,1731663547871, archiveDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/oldWALs, maxLogs=10 2024-11-15T09:39:08,194 INFO [master/791f12959b23:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C44059%2C1731663547871.1731663548193 2024-11-15T09:39:08,201 INFO [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/WALs/791f12959b23,44059,1731663547871/791f12959b23%2C44059%2C1731663547871.1731663548193 2024-11-15T09:39:08,204 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43163:43163),(127.0.0.1/127.0.0.1:39329:39329)] 2024-11-15T09:39:08,208 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:39:08,209 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:39:08,209 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,209 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,211 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T09:39:08,212 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:08,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T09:39:08,213 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:39:08,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T09:39:08,214 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:39:08,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T09:39:08,215 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T09:39:08,216 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,217 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,217 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,218 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,218 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,219 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T09:39:08,220 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T09:39:08,224 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:39:08,224 INFO [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743500, jitterRate=-0.05459098517894745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T09:39:08,225 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731663548209Initializing all the Stores at 1731663548210 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663548210Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663548210Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663548210Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663548210Cleaning up temporary data from old regions at 1731663548218 (+8 ms)Region opened successfully at 1731663548225 (+7 ms) 2024-11-15T09:39:08,225 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T09:39:08,228 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ce58501, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:39:08,229 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T09:39:08,229 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T09:39:08,229 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T09:39:08,229 INFO [master/791f12959b23:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T09:39:08,230 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T09:39:08,230 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T09:39:08,230 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T09:39:08,234 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T09:39:08,235 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T09:39:08,247 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T09:39:08,248 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T09:39:08,248 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T09:39:08,258 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T09:39:08,258 INFO [master/791f12959b23:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T09:39:08,259 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T09:39:08,268 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T09:39:08,269 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T09:39:08,279 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T09:39:08,281 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T09:39:08,289 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T09:39:08,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:39:08,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T09:39:08,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,300 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=791f12959b23,44059,1731663547871, sessionid=0x1013ddcc97e0000, setting cluster-up flag (Was=false) 2024-11-15T09:39:08,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,352 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T09:39:08,354 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,44059,1731663547871 2024-11-15T09:39:08,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,405 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T09:39:08,408 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=791f12959b23,44059,1731663547871 2024-11-15T09:39:08,411 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T09:39:08,414 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T09:39:08,414 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T09:39:08,414 INFO [master/791f12959b23:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T09:39:08,415 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 791f12959b23,44059,1731663547871 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/791f12959b23:0, corePoolSize=5, maxPoolSize=5 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/791f12959b23:0, corePoolSize=10, maxPoolSize=10 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:39:08,417 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731663578418 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T09:39:08,418 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,418 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:39:08,419 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T09:39:08,419 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T09:39:08,419 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T09:39:08,419 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T09:39:08,419 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T09:39:08,419 INFO [master/791f12959b23:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T09:39:08,419 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663548419,5,FailOnTimeoutGroup] 2024-11-15T09:39:08,419 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663548419,5,FailOnTimeoutGroup] 2024-11-15T09:39:08,419 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,420 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T09:39:08,420 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,420 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,420 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,420 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T09:39:08,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:39:08,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741831_1007 (size=1321) 2024-11-15T09:39:08,427 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T09:39:08,427 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0 2024-11-15T09:39:08,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:39:08,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741832_1008 (size=32) 2024-11-15T09:39:08,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:39:08,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:39:08,437 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:39:08,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:08,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:39:08,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:39:08,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:08,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:39:08,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:39:08,440 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:08,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:39:08,441 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:39:08,441 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:08,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:08,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:39:08,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740 2024-11-15T09:39:08,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740 2024-11-15T09:39:08,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:39:08,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:39:08,444 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:39:08,445 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:39:08,446 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T09:39:08,446 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820012, jitterRate=0.04269978404045105}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:39:08,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731663548435Initializing all the Stores at 1731663548436 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663548436Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663548436Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663548436Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663548436Cleaning up temporary data from old regions at 1731663548443 (+7 ms)Region opened successfully at 1731663548447 (+4 ms) 2024-11-15T09:39:08,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:39:08,447 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:39:08,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:39:08,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:39:08,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:39:08,447 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:39:08,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663548447Disabling compacts and flushes for region at 1731663548447Disabling writes for close at 1731663548447Writing region close event to WAL at 1731663548447Closed at 1731663548447 2024-11-15T09:39:08,448 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:39:08,448 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T09:39:08,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T09:39:08,449 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:39:08,450 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T09:39:08,486 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(746): ClusterId : d97a6a11-0b96-4b5b-babc-22e16bde4fb1 2024-11-15T09:39:08,487 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T09:39:08,498 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T09:39:08,498 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T09:39:08,512 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T09:39:08,513 DEBUG [RS:0;791f12959b23:36281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26e088de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=791f12959b23/172.17.0.2:0 2024-11-15T09:39:08,528 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;791f12959b23:36281 2024-11-15T09:39:08,528 INFO [RS:0;791f12959b23:36281 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T09:39:08,528 INFO [RS:0;791f12959b23:36281 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T09:39:08,528 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T09:39:08,529 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(2659): reportForDuty to master=791f12959b23,44059,1731663547871 with port=36281, startcode=1731663548062 2024-11-15T09:39:08,529 DEBUG [RS:0;791f12959b23:36281 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T09:39:08,530 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50333, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T09:39:08,531 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44059 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 791f12959b23,36281,1731663548062 2024-11-15T09:39:08,531 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44059 {}] master.ServerManager(517): Registering regionserver=791f12959b23,36281,1731663548062 2024-11-15T09:39:08,532 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0 2024-11-15T09:39:08,532 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42123 2024-11-15T09:39:08,532 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T09:39:08,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:39:08,542 DEBUG [RS:0;791f12959b23:36281 {}] zookeeper.ZKUtil(111): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/791f12959b23,36281,1731663548062 2024-11-15T09:39:08,542 WARN [RS:0;791f12959b23:36281 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T09:39:08,542 INFO [RS:0;791f12959b23:36281 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:39:08,543 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/791f12959b23,36281,1731663548062 2024-11-15T09:39:08,543 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [791f12959b23,36281,1731663548062] 2024-11-15T09:39:08,546 INFO [RS:0;791f12959b23:36281 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T09:39:08,547 INFO [RS:0;791f12959b23:36281 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T09:39:08,548 INFO [RS:0;791f12959b23:36281 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T09:39:08,548 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,548 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T09:39:08,549 INFO [RS:0;791f12959b23:36281 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T09:39:08,549 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/791f12959b23:0, corePoolSize=2, maxPoolSize=2 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,549 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,550 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,550 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,550 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,550 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/791f12959b23:0, corePoolSize=1, maxPoolSize=1 2024-11-15T09:39:08,550 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:39:08,550 DEBUG [RS:0;791f12959b23:36281 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/791f12959b23:0, corePoolSize=3, maxPoolSize=3 2024-11-15T09:39:08,550 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,550 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,550 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,550 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,550 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,550 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36281,1731663548062-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:39:08,565 INFO [RS:0;791f12959b23:36281 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T09:39:08,565 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,36281,1731663548062-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,565 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,565 INFO [RS:0;791f12959b23:36281 {}] regionserver.Replication(171): 791f12959b23,36281,1731663548062 started 2024-11-15T09:39:08,577 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:08,577 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1482): Serving as 791f12959b23,36281,1731663548062, RpcServer on 791f12959b23/172.17.0.2:36281, sessionid=0x1013ddcc97e0001 2024-11-15T09:39:08,577 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T09:39:08,577 DEBUG [RS:0;791f12959b23:36281 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 791f12959b23,36281,1731663548062 2024-11-15T09:39:08,577 DEBUG [RS:0;791f12959b23:36281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,36281,1731663548062' 2024-11-15T09:39:08,577 DEBUG [RS:0;791f12959b23:36281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T09:39:08,578 DEBUG [RS:0;791f12959b23:36281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T09:39:08,578 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T09:39:08,578 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T09:39:08,578 DEBUG [RS:0;791f12959b23:36281 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 791f12959b23,36281,1731663548062 2024-11-15T09:39:08,578 DEBUG [RS:0;791f12959b23:36281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '791f12959b23,36281,1731663548062' 2024-11-15T09:39:08,578 DEBUG [RS:0;791f12959b23:36281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T09:39:08,579 DEBUG [RS:0;791f12959b23:36281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T09:39:08,579 DEBUG [RS:0;791f12959b23:36281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T09:39:08,579 INFO [RS:0;791f12959b23:36281 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T09:39:08,579 INFO [RS:0;791f12959b23:36281 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T09:39:08,600 WARN [791f12959b23:44059 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T09:39:08,682 INFO [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C36281%2C1731663548062, suffix=, logDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/791f12959b23,36281,1731663548062, archiveDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs, maxLogs=32 2024-11-15T09:39:08,683 INFO [RS:0;791f12959b23:36281 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C36281%2C1731663548062.1731663548683 2024-11-15T09:39:08,692 INFO [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/791f12959b23,36281,1731663548062/791f12959b23%2C36281%2C1731663548062.1731663548683 2024-11-15T09:39:08,694 DEBUG [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39329:39329),(127.0.0.1/127.0.0.1:43163:43163)] 2024-11-15T09:39:08,850 DEBUG [791f12959b23:44059 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T09:39:08,851 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=791f12959b23,36281,1731663548062 2024-11-15T09:39:08,855 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,36281,1731663548062, state=OPENING 2024-11-15T09:39:08,942 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T09:39:08,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:08,954 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T09:39:08,954 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:39:08,954 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:39:08,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,36281,1731663548062}] 2024-11-15T09:39:09,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:09,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:09,108 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T09:39:09,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T09:39:09,120 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T09:39:09,120 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:39:09,121 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=791f12959b23%2C36281%2C1731663548062.meta, suffix=.meta, logDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/791f12959b23,36281,1731663548062, archiveDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs, maxLogs=32 2024-11-15T09:39:09,122 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 791f12959b23%2C36281%2C1731663548062.meta.1731663549122.meta 2024-11-15T09:39:09,126 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/791f12959b23,36281,1731663548062/791f12959b23%2C36281%2C1731663548062.meta.1731663549122.meta 2024-11-15T09:39:09,130 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39329:39329),(127.0.0.1/127.0.0.1:43163:43163)] 2024-11-15T09:39:09,132 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T09:39:09,133 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T09:39:09,133 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T09:39:09,133 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T09:39:09,133 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T09:39:09,133 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T09:39:09,133 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T09:39:09,133 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T09:39:09,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T09:39:09,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T09:39:09,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:09,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:09,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T09:39:09,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T09:39:09,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:09,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:09,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T09:39:09,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T09:39:09,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:09,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:09,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T09:39:09,138 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T09:39:09,138 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T09:39:09,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T09:39:09,139 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T09:39:09,139 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740 2024-11-15T09:39:09,140 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740 2024-11-15T09:39:09,141 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T09:39:09,141 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T09:39:09,141 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T09:39:09,142 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T09:39:09,143 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773195, jitterRate=-0.016831934452056885}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T09:39:09,143 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T09:39:09,143 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731663549133Writing region info on filesystem at 1731663549133Initializing all the Stores at 1731663549134 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663549134Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663549134Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731663549134Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731663549134Cleaning up temporary data from old regions at 1731663549141 (+7 ms)Running coprocessor post-open hooks at 1731663549143 (+2 ms)Region opened successfully at 1731663549143 2024-11-15T09:39:09,144 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731663549107 2024-11-15T09:39:09,146 DEBUG [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T09:39:09,146 INFO [RS_OPEN_META-regionserver/791f12959b23:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T09:39:09,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=791f12959b23,36281,1731663548062 2024-11-15T09:39:09,147 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 791f12959b23,36281,1731663548062, state=OPEN 2024-11-15T09:39:09,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:39:09,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T09:39:09,191 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=791f12959b23,36281,1731663548062 2024-11-15T09:39:09,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:39:09,191 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T09:39:09,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T09:39:09,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=791f12959b23,36281,1731663548062 in 237 msec 2024-11-15T09:39:09,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T09:39:09,200 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 747 msec 2024-11-15T09:39:09,201 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T09:39:09,201 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T09:39:09,203 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:39:09,203 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,36281,1731663548062, seqNum=-1] 2024-11-15T09:39:09,203 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:39:09,205 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56503, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:39:09,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 799 msec 2024-11-15T09:39:09,212 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731663549212, completionTime=-1 2024-11-15T09:39:09,212 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T09:39:09,212 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T09:39:09,214 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T09:39:09,214 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731663609214 2024-11-15T09:39:09,214 INFO [master/791f12959b23:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731663669214 2024-11-15T09:39:09,214 INFO [master/791f12959b23:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T09:39:09,215 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,44059,1731663547871-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,215 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,44059,1731663547871-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,215 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,44059,1731663547871-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,215 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-791f12959b23:44059, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,215 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,215 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,217 DEBUG [master/791f12959b23:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T09:39:09,219 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.091sec 2024-11-15T09:39:09,219 INFO [master/791f12959b23:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T09:39:09,219 INFO [master/791f12959b23:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T09:39:09,219 INFO [master/791f12959b23:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T09:39:09,219 INFO [master/791f12959b23:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T09:39:09,220 INFO [master/791f12959b23:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T09:39:09,220 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,44059,1731663547871-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T09:39:09,220 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,44059,1731663547871-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T09:39:09,222 DEBUG [master/791f12959b23:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T09:39:09,222 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T09:39:09,223 INFO [master/791f12959b23:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=791f12959b23,44059,1731663547871-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T09:39:09,288 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@591c2c01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:39:09,288 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 791f12959b23,44059,-1 for getting cluster id 2024-11-15T09:39:09,288 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T09:39:09,291 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd97a6a11-0b96-4b5b-babc-22e16bde4fb1' 2024-11-15T09:39:09,292 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T09:39:09,292 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d97a6a11-0b96-4b5b-babc-22e16bde4fb1" 2024-11-15T09:39:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f3774, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:39:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [791f12959b23,44059,-1] 2024-11-15T09:39:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T09:39:09,294 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:09,296 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T09:39:09,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13830cb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T09:39:09,299 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T09:39:09,300 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=791f12959b23,36281,1731663548062, seqNum=-1] 2024-11-15T09:39:09,301 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T09:39:09,302 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59622, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T09:39:09,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=791f12959b23,44059,1731663547871 2024-11-15T09:39:09,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T09:39:09,306 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T09:39:09,306 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T09:39:09,308 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs, maxLogs=32 2024-11-15T09:39:09,309 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731663549309 2024-11-15T09:39:09,314 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/test.com,8080,1/test.com%2C8080%2C1.1731663549309 2024-11-15T09:39:09,318 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43163:43163),(127.0.0.1/127.0.0.1:39329:39329)] 2024-11-15T09:39:09,321 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731663549321 2024-11-15T09:39:09,326 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,326 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,326 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,326 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,326 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,326 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/test.com,8080,1/test.com%2C8080%2C1.1731663549309 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/test.com,8080,1/test.com%2C8080%2C1.1731663549321 2024-11-15T09:39:09,327 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43163:43163),(127.0.0.1/127.0.0.1:39329:39329)] 2024-11-15T09:39:09,327 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/test.com,8080,1/test.com%2C8080%2C1.1731663549309 is not closed yet, will try archiving it next time 2024-11-15T09:39:09,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,328 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741835_1011 (size=93) 2024-11-15T09:39:09,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741835_1011 (size=93) 2024-11-15T09:39:09,328 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,328 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,329 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/WALs/test.com,8080,1/test.com%2C8080%2C1.1731663549309 to hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs/test.com%2C8080%2C1.1731663549309 2024-11-15T09:39:09,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741836_1012 (size=93) 2024-11-15T09:39:09,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741836_1012 (size=93) 2024-11-15T09:39:09,737 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs 2024-11-15T09:39:09,737 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731663549321) 2024-11-15T09:39:09,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T09:39:09,738 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:39:09,738 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:39:09,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:09,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:09,739 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T09:39:09,739 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T09:39:09,739 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1851891525, stopped=false 2024-11-15T09:39:09,739 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=791f12959b23,44059,1731663547871 2024-11-15T09:39:09,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:09,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:39:09,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T09:39:09,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:09,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:09,784 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:39:09,784 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T09:39:09,784 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:39:09,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:09,785 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:39:09,785 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T09:39:09,785 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '791f12959b23,36281,1731663548062' ***** 2024-11-15T09:39:09,785 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T09:39:09,785 INFO [RS:0;791f12959b23:36281 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T09:39:09,785 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T09:39:09,785 INFO [RS:0;791f12959b23:36281 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T09:39:09,785 INFO [RS:0;791f12959b23:36281 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T09:39:09,785 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(959): stopping server 791f12959b23,36281,1731663548062 2024-11-15T09:39:09,785 INFO [RS:0;791f12959b23:36281 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:39:09,785 INFO [RS:0;791f12959b23:36281 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;791f12959b23:36281. 2024-11-15T09:39:09,785 DEBUG [RS:0;791f12959b23:36281 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T09:39:09,785 DEBUG [RS:0;791f12959b23:36281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:09,786 INFO [RS:0;791f12959b23:36281 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T09:39:09,786 INFO [RS:0;791f12959b23:36281 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T09:39:09,786 INFO [RS:0;791f12959b23:36281 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T09:39:09,786 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T09:39:09,786 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T09:39:09,786 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T09:39:09,786 DEBUG [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T09:39:09,786 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T09:39:09,786 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T09:39:09,786 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T09:39:09,786 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T09:39:09,786 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T09:39:09,786 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T09:39:09,800 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/.tmp/ns/c3c1b2b4344740f89f3c84a8dead2ce1 is 43, key is default/ns:d/1731663549205/Put/seqid=0 2024-11-15T09:39:09,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741837_1013 (size=5153) 2024-11-15T09:39:09,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741837_1013 (size=5153) 2024-11-15T09:39:09,804 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/.tmp/ns/c3c1b2b4344740f89f3c84a8dead2ce1 2024-11-15T09:39:09,809 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/.tmp/ns/c3c1b2b4344740f89f3c84a8dead2ce1 as hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/ns/c3c1b2b4344740f89f3c84a8dead2ce1 2024-11-15T09:39:09,814 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/ns/c3c1b2b4344740f89f3c84a8dead2ce1, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T09:39:09,815 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-15T09:39:09,815 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T09:39:09,819 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T09:39:09,819 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:39:09,819 INFO [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T09:39:09,820 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731663549786Running coprocessor pre-close hooks at 1731663549786Disabling compacts and flushes for region at 1731663549786Disabling writes for close at 1731663549786Obtaining lock to block concurrent updates at 1731663549786Preparing flush snapshotting stores in 1588230740 at 1731663549786Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731663549786Flushing stores of hbase:meta,,1.1588230740 at 1731663549787 (+1 ms)Flushing 1588230740/ns: creating writer at 1731663549787Flushing 1588230740/ns: appending metadata at 1731663549799 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1731663549799Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a588724: reopening flushed file at 1731663549808 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1731663549815 (+7 ms)Writing region close event to WAL at 1731663549816 (+1 ms)Running coprocessor post-close hooks at 1731663549819 (+3 ms)Closed at 1731663549819 2024-11-15T09:39:09,820 DEBUG [RS_CLOSE_META-regionserver/791f12959b23:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T09:39:09,986 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(976): stopping server 791f12959b23,36281,1731663548062; all regions closed. 2024-11-15T09:39:09,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,988 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741834_1010 (size=1152) 2024-11-15T09:39:09,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741834_1010 (size=1152) 2024-11-15T09:39:09,997 DEBUG [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs 2024-11-15T09:39:09,997 INFO [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C36281%2C1731663548062.meta:.meta(num 1731663549122) 2024-11-15T09:39:09,998 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,998 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,999 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,999 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:09,999 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:10,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741833_1009 (size=93) 2024-11-15T09:39:10,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741833_1009 (size=93) 2024-11-15T09:39:10,004 DEBUG [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/oldWALs 2024-11-15T09:39:10,004 INFO [RS:0;791f12959b23:36281 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 791f12959b23%2C36281%2C1731663548062:(num 1731663548683) 2024-11-15T09:39:10,004 DEBUG [RS:0;791f12959b23:36281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T09:39:10,004 INFO [RS:0;791f12959b23:36281 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T09:39:10,004 INFO [RS:0;791f12959b23:36281 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:39:10,005 INFO [RS:0;791f12959b23:36281 {}] hbase.ChoreService(370): Chore service for: regionserver/791f12959b23:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T09:39:10,005 INFO [RS:0;791f12959b23:36281 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:39:10,005 INFO [regionserver/791f12959b23:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:39:10,005 INFO [RS:0;791f12959b23:36281 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36281 2024-11-15T09:39:10,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T09:39:10,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/791f12959b23,36281,1731663548062 2024-11-15T09:39:10,016 INFO [RS:0;791f12959b23:36281 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:39:10,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,41295,1731663366547/791f12959b23%2C41295%2C1731663366547.1731663366900 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:10,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42831/user/jenkins/test-data/38796710-7f2b-33fd-15a5-949fa3bcde54/WALs/791f12959b23,46093,1731663365267/791f12959b23%2C46093%2C1731663365267.meta.1731663366363.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T09:39:10,026 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [791f12959b23,36281,1731663548062] 2024-11-15T09:39:10,036 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/791f12959b23,36281,1731663548062 already deleted, retry=false 2024-11-15T09:39:10,037 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 791f12959b23,36281,1731663548062 expired; onlineServers=0 2024-11-15T09:39:10,037 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '791f12959b23,44059,1731663547871' ***** 2024-11-15T09:39:10,037 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T09:39:10,037 DEBUG [M:0;791f12959b23:44059 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T09:39:10,037 DEBUG [M:0;791f12959b23:44059 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T09:39:10,037 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T09:39:10,037 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663548419 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.small.0-1731663548419,5,FailOnTimeoutGroup] 2024-11-15T09:39:10,037 DEBUG [master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663548419 {}] cleaner.HFileCleaner(306): Exit Thread[master/791f12959b23:0:becomeActiveMaster-HFileCleaner.large.0-1731663548419,5,FailOnTimeoutGroup] 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] hbase.ChoreService(370): Chore service for: master/791f12959b23:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T09:39:10,037 DEBUG [M:0;791f12959b23:44059 {}] master.HMaster(1795): Stopping service threads 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T09:39:10,037 INFO [M:0;791f12959b23:44059 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T09:39:10,038 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T09:39:10,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T09:39:10,047 DEBUG [M:0;791f12959b23:44059 {}] zookeeper.ZKUtil(347): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T09:39:10,047 WARN [M:0;791f12959b23:44059 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T09:39:10,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T09:39:10,048 INFO [M:0;791f12959b23:44059 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/.lastflushedseqids 2024-11-15T09:39:10,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741838_1014 (size=99) 2024-11-15T09:39:10,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741838_1014 (size=99) 2024-11-15T09:39:10,055 INFO [M:0;791f12959b23:44059 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T09:39:10,055 INFO [M:0;791f12959b23:44059 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T09:39:10,056 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T09:39:10,056 INFO [M:0;791f12959b23:44059 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:10,056 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:10,056 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T09:39:10,056 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:10,056 INFO [M:0;791f12959b23:44059 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T09:39:10,074 DEBUG [M:0;791f12959b23:44059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9f19972771fd41aa8ac42d4d89272e83 is 82, key is hbase:meta,,1/info:regioninfo/1731663549146/Put/seqid=0 2024-11-15T09:39:10,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741839_1015 (size=5672) 2024-11-15T09:39:10,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741839_1015 (size=5672) 2024-11-15T09:39:10,078 INFO [M:0;791f12959b23:44059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9f19972771fd41aa8ac42d4d89272e83 2024-11-15T09:39:10,095 DEBUG [M:0;791f12959b23:44059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/791a205c2ee34575b50c9ab62a3a1a27 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731663549211/Put/seqid=0 2024-11-15T09:39:10,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741840_1016 (size=5275) 2024-11-15T09:39:10,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741840_1016 (size=5275) 2024-11-15T09:39:10,099 INFO [M:0;791f12959b23:44059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/791a205c2ee34575b50c9ab62a3a1a27 2024-11-15T09:39:10,118 DEBUG [M:0;791f12959b23:44059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/41679d585e0a462fb390b294eae6d41a is 69, key is 791f12959b23,36281,1731663548062/rs:state/1731663548531/Put/seqid=0 2024-11-15T09:39:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741841_1017 (size=5156) 2024-11-15T09:39:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741841_1017 (size=5156) 2024-11-15T09:39:10,123 INFO [M:0;791f12959b23:44059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/41679d585e0a462fb390b294eae6d41a 2024-11-15T09:39:10,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:10,126 INFO [RS:0;791f12959b23:36281 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:39:10,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36281-0x1013ddcc97e0001, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:10,126 INFO [RS:0;791f12959b23:36281 {}] regionserver.HRegionServer(1031): Exiting; stopping=791f12959b23,36281,1731663548062; zookeeper connection closed. 2024-11-15T09:39:10,127 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7f0e32a1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7f0e32a1 2024-11-15T09:39:10,127 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T09:39:10,141 DEBUG [M:0;791f12959b23:44059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/88ad4a54a48246b9a311cc1f9835c34d is 52, key is load_balancer_on/state:d/1731663549305/Put/seqid=0 2024-11-15T09:39:10,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741842_1018 (size=5056) 2024-11-15T09:39:10,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741842_1018 (size=5056) 2024-11-15T09:39:10,146 INFO [M:0;791f12959b23:44059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/88ad4a54a48246b9a311cc1f9835c34d 2024-11-15T09:39:10,150 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9f19972771fd41aa8ac42d4d89272e83 as hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9f19972771fd41aa8ac42d4d89272e83 2024-11-15T09:39:10,154 INFO [M:0;791f12959b23:44059 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9f19972771fd41aa8ac42d4d89272e83, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T09:39:10,155 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/791a205c2ee34575b50c9ab62a3a1a27 as hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/791a205c2ee34575b50c9ab62a3a1a27 2024-11-15T09:39:10,158 INFO [M:0;791f12959b23:44059 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/791a205c2ee34575b50c9ab62a3a1a27, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T09:39:10,159 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/41679d585e0a462fb390b294eae6d41a as hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/41679d585e0a462fb390b294eae6d41a 2024-11-15T09:39:10,163 INFO [M:0;791f12959b23:44059 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/41679d585e0a462fb390b294eae6d41a, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T09:39:10,164 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/88ad4a54a48246b9a311cc1f9835c34d as hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/88ad4a54a48246b9a311cc1f9835c34d 2024-11-15T09:39:10,167 INFO [M:0;791f12959b23:44059 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42123/user/jenkins/test-data/1088c8ab-5d88-44fa-4bec-0e195a18ace0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/88ad4a54a48246b9a311cc1f9835c34d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T09:39:10,168 INFO [M:0;791f12959b23:44059 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false 2024-11-15T09:39:10,170 INFO [M:0;791f12959b23:44059 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T09:39:10,170 DEBUG [M:0;791f12959b23:44059 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731663550056Disabling compacts and flushes for region at 1731663550056Disabling writes for close at 1731663550056Obtaining lock to block concurrent updates at 1731663550056Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731663550056Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731663550056Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731663550057 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731663550057Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731663550074 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731663550074Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731663550082 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731663550095 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731663550095Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731663550103 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731663550117 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731663550117Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731663550127 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731663550141 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731663550141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6334792e: reopening flushed file at 1731663550149 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f2ead7e: reopening flushed file at 1731663550154 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8c470c8: reopening flushed file at 1731663550158 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@425b7fc7: reopening flushed file at 1731663550163 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=29, compaction requested=false at 1731663550168 (+5 ms)Writing region close event to WAL at 1731663550170 (+2 ms)Closed at 1731663550170 2024-11-15T09:39:10,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:10,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:10,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:10,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:10,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T09:39:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39711 is added to blk_1073741830_1006 (size=10311) 2024-11-15T09:39:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42195 is added to blk_1073741830_1006 (size=10311) 2024-11-15T09:39:10,173 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T09:39:10,173 INFO [M:0;791f12959b23:44059 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T09:39:10,173 INFO [M:0;791f12959b23:44059 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44059 2024-11-15T09:39:10,173 INFO [M:0;791f12959b23:44059 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T09:39:10,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:10,284 INFO [M:0;791f12959b23:44059 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T09:39:10,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44059-0x1013ddcc97e0000, quorum=127.0.0.1:50165, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T09:39:10,287 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T09:39:10,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16c97378{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:39:10,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,289 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61ab51b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:39:10,289 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:39:10,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@734134af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:39:10,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dbd8c23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir/,STOPPED} 2024-11-15T09:39:10,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T09:39:10,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T09:39:10,333 WARN [BP-1257620986-172.17.0.2-1731663545383 heartbeating to localhost/127.0.0.1:42123 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:39:10,333 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:39:10,333 WARN [BP-1257620986-172.17.0.2-1731663545383 heartbeating to localhost/127.0.0.1:42123 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1257620986-172.17.0.2-1731663545383 (Datanode Uuid 30856fbf-559c-48c0-91e7-faecca3e3558) service to localhost/127.0.0.1:42123 2024-11-15T09:39:10,333 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:39:10,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T09:39:10,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T09:39:10,333 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T09:39:10,333 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data3/current/BP-1257620986-172.17.0.2-1731663545383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:10,333 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data4/current/BP-1257620986-172.17.0.2-1731663545383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:10,334 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:39:10,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@186b2d16{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T09:39:10,336 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e3ca285{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:39:10,336 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:39:10,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@591676f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:39:10,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cb35637{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir/,STOPPED} 2024-11-15T09:39:10,337 WARN [BP-1257620986-172.17.0.2-1731663545383 heartbeating to localhost/127.0.0.1:42123 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T09:39:10,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T09:39:10,337 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T09:39:10,337 WARN [BP-1257620986-172.17.0.2-1731663545383 heartbeating to localhost/127.0.0.1:42123 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1257620986-172.17.0.2-1731663545383 (Datanode Uuid 17685e14-64ed-41fd-9281-a7def1f629e3) service to localhost/127.0.0.1:42123 2024-11-15T09:39:10,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data1/current/BP-1257620986-172.17.0.2-1731663545383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:10,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/cluster_139d6b70-6422-75a3-ecb7-8c80714a4a9c/data/data2/current/BP-1257620986-172.17.0.2-1731663545383 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T09:39:10,338 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T09:39:10,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@349fe29e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T09:39:10,343 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d88d0b5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T09:39:10,343 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T09:39:10,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8035060{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T09:39:10,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@589971c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/967f8e5d-45b2-f609-ed41-10486d0afc57/hadoop.log.dir/,STOPPED} 2024-11-15T09:39:10,348 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T09:39:10,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T09:39:10,369 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 229) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42123 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42123 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42123 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42123 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42123 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42123 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42123 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42123 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=534 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=159 (was 164), ProcessCount=11 (was 11), AvailableMemoryMB=3916 (was 3917)