2024-11-23 19:34:48,217 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-23 19:34:48,234 main DEBUG Took 0.014588 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 19:34:48,235 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 19:34:48,235 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 19:34:48,237 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 19:34:48,239 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,248 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 19:34:48,265 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,267 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,268 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,268 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,269 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,270 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,271 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,272 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,272 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,273 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,274 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,275 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,276 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,276 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,277 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,278 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,278 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,279 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,280 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,280 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,281 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,281 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,282 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,283 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 19:34:48,283 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,284 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 19:34:48,286 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 19:34:48,287 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 19:34:48,290 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 19:34:48,291 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 19:34:48,293 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 19:34:48,293 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 19:34:48,305 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 19:34:48,309 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 19:34:48,312 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 19:34:48,312 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 19:34:48,313 main DEBUG createAppenders(={Console}) 2024-11-23 19:34:48,314 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-23 19:34:48,315 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-23 19:34:48,315 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-23 19:34:48,316 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 19:34:48,317 main DEBUG OutputStream closed 2024-11-23 19:34:48,317 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 19:34:48,318 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 19:34:48,318 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-23 19:34:48,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 19:34:48,396 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 19:34:48,397 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 19:34:48,398 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 19:34:48,398 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 19:34:48,399 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 19:34:48,399 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 19:34:48,399 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 19:34:48,400 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 19:34:48,400 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 19:34:48,400 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 19:34:48,401 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 19:34:48,401 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 19:34:48,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 19:34:48,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 19:34:48,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 19:34:48,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 19:34:48,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 19:34:48,406 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 19:34:48,406 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-23 19:34:48,406 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 19:34:48,407 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-23T19:34:48,656 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1 2024-11-23 19:34:48,658 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 19:34:48,659 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T19:34:48,667 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-23T19:34:48,702 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=899, ProcessCount=11, AvailableMemoryMB=4472 2024-11-23T19:34:48,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:34:48,721 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1, deleteOnExit=true 2024-11-23T19:34:48,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:34:48,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/test.cache.data in system properties and HBase conf 2024-11-23T19:34:48,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:34:48,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:34:48,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:34:48,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:34:48,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:34:48,829 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T19:34:48,924 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:34:48,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:34:48,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:34:48,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:34:48,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:34:48,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:34:48,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:34:48,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:34:48,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:34:48,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:34:48,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:34:48,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:34:48,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:34:48,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:34:48,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:34:49,564 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:34:50,213 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T19:34:50,310 INFO [Time-limited test {}] log.Log(170): Logging initialized @2840ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T19:34:50,402 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:34:50,466 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:34:50,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:34:50,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:34:50,494 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:34:50,510 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:34:50,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:34:50,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:34:50,759 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/java.io.tmpdir/jetty-localhost-40247-hadoop-hdfs-3_4_1-tests_jar-_-any-2268601707313385849/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:34:50,766 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40247} 2024-11-23T19:34:50,766 INFO [Time-limited test {}] server.Server(415): Started @3297ms 2024-11-23T19:34:50,797 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:34:51,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:34:51,376 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:34:51,377 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:34:51,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:34:51,378 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:34:51,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:34:51,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:34:51,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/java.io.tmpdir/jetty-localhost-35447-hadoop-hdfs-3_4_1-tests_jar-_-any-8433395621398494362/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:34:51,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:35447} 2024-11-23T19:34:51,519 INFO [Time-limited test {}] server.Server(415): Started @4049ms 2024-11-23T19:34:51,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:34:51,703 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:34:51,711 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:34:51,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:34:51,729 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:34:51,729 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:34:51,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:34:51,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:34:51,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/java.io.tmpdir/jetty-localhost-37619-hadoop-hdfs-3_4_1-tests_jar-_-any-9568912282975483345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:34:51,889 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:37619} 2024-11-23T19:34:51,890 INFO [Time-limited test {}] server.Server(415): Started @4420ms 2024-11-23T19:34:51,894 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:34:53,275 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data4/current/BP-601919595-172.17.0.3-1732390489657/current, will proceed with Du for space computation calculation, 2024-11-23T19:34:53,275 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data3/current/BP-601919595-172.17.0.3-1732390489657/current, will proceed with Du for space computation calculation, 2024-11-23T19:34:53,275 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data2/current/BP-601919595-172.17.0.3-1732390489657/current, will proceed with Du for space computation calculation, 2024-11-23T19:34:53,275 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data1/current/BP-601919595-172.17.0.3-1732390489657/current, will proceed with Du for space computation calculation, 2024-11-23T19:34:53,346 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:34:53,346 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:34:53,408 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fa1c154e6fe1cb7 with lease ID 0x3fc354b1a5d073e4: Processing first storage report for DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8 from datanode DatanodeRegistration(127.0.0.1:39965, datanodeUuid=07684f41-35f0-44c2-aa43-e9f17a039632, infoPort=39605, infoSecurePort=0, ipcPort=35959, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657) 2024-11-23T19:34:53,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fa1c154e6fe1cb7 with lease ID 0x3fc354b1a5d073e4: from storage DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8 node DatanodeRegistration(127.0.0.1:39965, datanodeUuid=07684f41-35f0-44c2-aa43-e9f17a039632, infoPort=39605, infoSecurePort=0, ipcPort=35959, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-23T19:34:53,410 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6beea82269744c41 with lease ID 0x3fc354b1a5d073e3: Processing first storage report for DS-d63e6473-7146-45f6-bb38-2973f26f02ad from datanode DatanodeRegistration(127.0.0.1:45339, datanodeUuid=404f4ad1-b202-464b-a485-35b09c1fe890, infoPort=39483, infoSecurePort=0, ipcPort=43555, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657) 2024-11-23T19:34:53,410 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6beea82269744c41 with lease ID 0x3fc354b1a5d073e3: from storage DS-d63e6473-7146-45f6-bb38-2973f26f02ad node DatanodeRegistration(127.0.0.1:45339, datanodeUuid=404f4ad1-b202-464b-a485-35b09c1fe890, infoPort=39483, infoSecurePort=0, ipcPort=43555, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:34:53,410 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fa1c154e6fe1cb7 with lease ID 0x3fc354b1a5d073e4: Processing first storage report for DS-534540b1-0511-474e-b72c-d1098d7c4d6a from datanode DatanodeRegistration(127.0.0.1:39965, datanodeUuid=07684f41-35f0-44c2-aa43-e9f17a039632, infoPort=39605, infoSecurePort=0, ipcPort=35959, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657) 2024-11-23T19:34:53,410 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fa1c154e6fe1cb7 with lease ID 0x3fc354b1a5d073e4: from storage DS-534540b1-0511-474e-b72c-d1098d7c4d6a node DatanodeRegistration(127.0.0.1:39965, datanodeUuid=07684f41-35f0-44c2-aa43-e9f17a039632, infoPort=39605, infoSecurePort=0, ipcPort=35959, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:34:53,410 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6beea82269744c41 with lease ID 0x3fc354b1a5d073e3: Processing first storage report for DS-89f7c0f5-6370-4936-b181-25a5e8b3018e from datanode DatanodeRegistration(127.0.0.1:45339, datanodeUuid=404f4ad1-b202-464b-a485-35b09c1fe890, infoPort=39483, infoSecurePort=0, ipcPort=43555, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657) 2024-11-23T19:34:53,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6beea82269744c41 with lease ID 0x3fc354b1a5d073e3: from storage DS-89f7c0f5-6370-4936-b181-25a5e8b3018e node DatanodeRegistration(127.0.0.1:45339, datanodeUuid=404f4ad1-b202-464b-a485-35b09c1fe890, infoPort=39483, infoSecurePort=0, ipcPort=43555, storageInfo=lv=-57;cid=testClusterID;nsid=1710603121;c=1732390489657), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:34:53,415 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1 2024-11-23T19:34:53,500 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/zookeeper_0, clientPort=58400, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:34:53,513 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58400 2024-11-23T19:34:53,528 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:53,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:53,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:34:53,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:34:53,821 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d with version=8 2024-11-23T19:34:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:34:53,932 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T19:34:54,275 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:34:54,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:34:54,288 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:34:54,293 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:34:54,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:34:54,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:34:54,469 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:34:54,539 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T19:34:54,550 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T19:34:54,555 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:34:54,585 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 79663 (auto-detected) 2024-11-23T19:34:54,586 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-23T19:34:54,608 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38567 2024-11-23T19:34:54,634 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38567 connecting to ZooKeeper ensemble=127.0.0.1:58400 2024-11-23T19:34:54,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385670x0, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:34:54,883 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38567-0x101693118c90000 connected 2024-11-23T19:34:55,055 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:55,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:55,072 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:34:55,078 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d, hbase.cluster.distributed=false 2024-11-23T19:34:55,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:34:55,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38567 2024-11-23T19:34:55,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38567 2024-11-23T19:34:55,159 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38567 2024-11-23T19:34:55,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38567 2024-11-23T19:34:55,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38567 2024-11-23T19:34:55,343 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:34:55,345 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:34:55,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:34:55,346 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:34:55,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:34:55,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:34:55,350 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:34:55,356 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:34:55,357 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43113 2024-11-23T19:34:55,360 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43113 connecting to ZooKeeper ensemble=127.0.0.1:58400 2024-11-23T19:34:55,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:55,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:55,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431130x0, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:34:55,452 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43113-0x101693118c90001 connected 2024-11-23T19:34:55,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:34:55,463 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:34:55,477 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:34:55,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:34:55,488 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:34:55,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43113 2024-11-23T19:34:55,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43113 2024-11-23T19:34:55,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43113 2024-11-23T19:34:55,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43113 2024-11-23T19:34:55,506 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43113 2024-11-23T19:34:55,524 DEBUG [M:0;387b213c044a:38567 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:38567 2024-11-23T19:34:55,525 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,38567,1732390494017 2024-11-23T19:34:55,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:34:55,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:34:55,559 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,38567,1732390494017 2024-11-23T19:34:55,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:34:55,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:55,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:55,613 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:34:55,621 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,38567,1732390494017 from backup master directory 2024-11-23T19:34:55,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,38567,1732390494017 2024-11-23T19:34:55,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:34:55,639 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:34:55,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:34:55,639 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,38567,1732390494017 2024-11-23T19:34:55,642 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T19:34:55,644 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T19:34:55,749 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase.id] with ID: 6b9f7251-3bc9-4cf9-a9ec-4913397a7f00 2024-11-23T19:34:55,750 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/.tmp/hbase.id 2024-11-23T19:34:55,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:34:55,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:34:56,200 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/.tmp/hbase.id]:[hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase.id] 2024-11-23T19:34:56,283 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:56,294 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:34:56,322 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 26ms. 2024-11-23T19:34:56,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:56,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:56,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:34:56,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:34:56,446 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:34:56,450 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:34:56,458 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:34:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:34:56,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:34:56,537 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store 2024-11-23T19:34:56,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:34:56,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:34:56,577 INFO [master/387b213c044a:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T19:34:56,581 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:34:56,583 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:34:56,583 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:34:56,583 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:34:56,585 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-23T19:34:56,586 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:34:56,586 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:34:56,587 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390496583Disabling compacts and flushes for region at 1732390496583Disabling writes for close at 1732390496585 (+2 ms)Writing region close event to WAL at 1732390496586 (+1 ms)Closed at 1732390496586 2024-11-23T19:34:56,590 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/.initializing 2024-11-23T19:34:56,590 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/WALs/387b213c044a,38567,1732390494017 2024-11-23T19:34:56,629 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C38567%2C1732390494017, suffix=, logDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/WALs/387b213c044a,38567,1732390494017, archiveDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/oldWALs, maxLogs=10 2024-11-23T19:34:56,641 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38567%2C1732390494017.1732390496635 2024-11-23T19:34:56,670 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/WALs/387b213c044a,38567,1732390494017/387b213c044a%2C38567%2C1732390494017.1732390496635 2024-11-23T19:34:56,680 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:34:56,682 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:34:56,682 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:34:56,686 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,687 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:34:56,761 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:56,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:56,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:34:56,768 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:56,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:34:56,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:34:56,773 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:56,776 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:34:56,777 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:34:56,783 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:56,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:34:56,785 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,789 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,790 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,796 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,796 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,800 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:34:56,804 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:34:56,809 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:34:56,811 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766737, jitterRate=-0.025044366717338562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:34:56,818 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390496700Initializing all the Stores at 1732390496704 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390496705 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390496705Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390496706 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390496706Cleaning up temporary data from old regions at 1732390496796 (+90 ms)Region opened successfully at 1732390496818 (+22 ms) 2024-11-23T19:34:56,820 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:34:56,864 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e24f2c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:34:56,906 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:34:56,922 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:34:56,923 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:34:56,928 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:34:56,932 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-11-23T19:34:56,941 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-23T19:34:56,942 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:34:56,988 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:34:57,003 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:34:57,051 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:34:57,056 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:34:57,058 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:34:57,071 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:34:57,074 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:34:57,091 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:34:57,103 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:34:57,105 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:34:57,113 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:34:57,146 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:34:57,155 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:34:57,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:34:57,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:34:57,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:57,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:57,173 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,38567,1732390494017, sessionid=0x101693118c90000, setting cluster-up flag (Was=false) 2024-11-23T19:34:57,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:57,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:57,239 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:34:57,242 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,38567,1732390494017 2024-11-23T19:34:57,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:57,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:57,303 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:34:57,307 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,38567,1732390494017 2024-11-23T19:34:57,316 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:34:57,424 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:34:57,429 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(746): ClusterId : 6b9f7251-3bc9-4cf9-a9ec-4913397a7f00 2024-11-23T19:34:57,432 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:34:57,437 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:34:57,444 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:34:57,450 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:34:57,450 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:34:57,451 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,38567,1732390494017 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:34:57,469 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:34:57,470 DEBUG [RS:0;387b213c044a:43113 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78099ffe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:34:57,470 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:34:57,471 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:34:57,471 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:34:57,471 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:34:57,471 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:34:57,471 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,472 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:34:57,472 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,485 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390527485 2024-11-23T19:34:57,485 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:34:57,485 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:34:57,487 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:34:57,489 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:34:57,494 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:34:57,494 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:43113 2024-11-23T19:34:57,494 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:34:57,496 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:34:57,497 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:34:57,499 INFO [RS:0;387b213c044a:43113 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:34:57,498 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,499 INFO [RS:0;387b213c044a:43113 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:34:57,500 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:34:57,501 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:34:57,503 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:34:57,504 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:34:57,504 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:57,505 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:34:57,506 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,38567,1732390494017 with port=43113, startcode=1732390495279 2024-11-23T19:34:57,512 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:34:57,512 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:34:57,516 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390497514,5,FailOnTimeoutGroup] 2024-11-23T19:34:57,521 DEBUG [RS:0;387b213c044a:43113 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:34:57,524 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390497517,5,FailOnTimeoutGroup] 2024-11-23T19:34:57,524 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,525 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:34:57,526 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,527 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:34:57,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:34:57,540 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:34:57,541 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d 2024-11-23T19:34:57,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:34:57,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:34:57,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:34:57,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:34:57,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:34:57,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:57,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:57,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:34:57,598 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:34:57,598 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:57,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:57,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:34:57,605 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:34:57,605 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:57,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:57,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:34:57,617 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:34:57,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:57,620 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51131, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:34:57,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:57,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:34:57,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38567 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,43113,1732390495279 2024-11-23T19:34:57,631 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740 2024-11-23T19:34:57,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38567 {}] master.ServerManager(517): Registering regionserver=387b213c044a,43113,1732390495279 2024-11-23T19:34:57,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740 2024-11-23T19:34:57,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:34:57,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:34:57,651 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:34:57,654 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d 2024-11-23T19:34:57,654 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:34:57,654 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35281 2024-11-23T19:34:57,655 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:34:57,661 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:34:57,662 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690850, jitterRate=-0.12154002487659454}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:34:57,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390497575Initializing all the Stores at 1732390497578 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390497578Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390497580 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390497580Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390497580Cleaning up temporary data from old regions at 1732390497649 (+69 ms)Region opened successfully at 1732390497666 (+17 ms) 2024-11-23T19:34:57,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:34:57,667 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:34:57,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:34:57,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:34:57,667 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:34:57,669 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:34:57,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390497667Disabling compacts and flushes for region at 1732390497667Disabling writes for close at 1732390497667Writing region close event to WAL at 1732390497669 (+2 ms)Closed at 1732390497669 2024-11-23T19:34:57,672 DEBUG [RS:0;387b213c044a:43113 {}] zookeeper.ZKUtil(111): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,43113,1732390495279 2024-11-23T19:34:57,672 WARN [RS:0;387b213c044a:43113 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:34:57,672 INFO [RS:0;387b213c044a:43113 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:34:57,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:34:57,673 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279 2024-11-23T19:34:57,674 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:34:57,674 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:34:57,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:34:57,686 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,43113,1732390495279] 2024-11-23T19:34:57,692 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:34:57,696 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:34:57,723 INFO [RS:0;387b213c044a:43113 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:34:57,746 INFO [RS:0;387b213c044a:43113 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:34:57,755 INFO [RS:0;387b213c044a:43113 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:34:57,756 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,757 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:34:57,768 INFO [RS:0;387b213c044a:43113 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:34:57,770 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,770 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,771 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,771 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,771 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,771 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,771 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:34:57,771 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,772 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,772 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,772 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,772 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,772 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:34:57,773 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:34:57,773 DEBUG [RS:0;387b213c044a:43113 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:34:57,777 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,777 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,778 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,778 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,778 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,778 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43113,1732390495279-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:34:57,799 INFO [RS:0;387b213c044a:43113 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:34:57,801 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43113,1732390495279-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,801 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,802 INFO [RS:0;387b213c044a:43113 {}] regionserver.Replication(171): 387b213c044a,43113,1732390495279 started 2024-11-23T19:34:57,822 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:57,823 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,43113,1732390495279, RpcServer on 387b213c044a/172.17.0.3:43113, sessionid=0x101693118c90001 2024-11-23T19:34:57,824 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:34:57,824 DEBUG [RS:0;387b213c044a:43113 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,43113,1732390495279 2024-11-23T19:34:57,824 DEBUG [RS:0;387b213c044a:43113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,43113,1732390495279' 2024-11-23T19:34:57,825 DEBUG [RS:0;387b213c044a:43113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:34:57,828 DEBUG [RS:0;387b213c044a:43113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:34:57,829 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:34:57,829 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:34:57,830 DEBUG [RS:0;387b213c044a:43113 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,43113,1732390495279 2024-11-23T19:34:57,830 DEBUG [RS:0;387b213c044a:43113 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,43113,1732390495279' 2024-11-23T19:34:57,830 DEBUG [RS:0;387b213c044a:43113 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:34:57,831 DEBUG [RS:0;387b213c044a:43113 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:34:57,832 DEBUG [RS:0;387b213c044a:43113 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:34:57,832 INFO [RS:0;387b213c044a:43113 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:34:57,832 INFO [RS:0;387b213c044a:43113 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:34:57,847 WARN [387b213c044a:38567 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T19:34:57,949 INFO [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C43113%2C1732390495279, suffix=, logDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279, archiveDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs, maxLogs=32 2024-11-23T19:34:57,953 INFO [RS:0;387b213c044a:43113 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390497953 2024-11-23T19:34:57,981 INFO [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390497953 2024-11-23T19:34:57,991 DEBUG [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:34:58,100 DEBUG [387b213c044a:38567 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:34:58,115 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,43113,1732390495279 2024-11-23T19:34:58,123 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,43113,1732390495279, state=OPENING 2024-11-23T19:34:58,187 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:34:58,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:58,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:34:58,199 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:34:58,199 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:34:58,200 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:34:58,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,43113,1732390495279}] 2024-11-23T19:34:58,384 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:34:58,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34935, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:34:58,406 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:34:58,407 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:34:58,412 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C43113%2C1732390495279.meta, suffix=.meta, logDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279, archiveDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs, maxLogs=32 2024-11-23T19:34:58,415 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.meta.1732390498415.meta 2024-11-23T19:34:58,430 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.meta.1732390498415.meta 2024-11-23T19:34:58,432 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:34:58,441 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:34:58,443 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:34:58,445 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:34:58,450 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:34:58,455 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:34:58,456 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:34:58,456 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:34:58,457 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:34:58,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:34:58,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:34:58,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:58,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:58,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:34:58,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:34:58,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:58,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:58,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:34:58,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:34:58,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:58,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:58,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:34:58,474 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:34:58,474 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:58,475 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:34:58,476 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:34:58,478 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740 2024-11-23T19:34:58,482 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740 2024-11-23T19:34:58,486 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:34:58,486 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:34:58,487 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:34:58,492 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:34:58,494 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721733, jitterRate=-0.08226911723613739}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:34:58,494 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:34:58,497 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390498457Writing region info on filesystem at 1732390498457Initializing all the Stores at 1732390498459 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390498460 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390498460Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390498460Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390498460Cleaning up temporary data from old regions at 1732390498486 (+26 ms)Running coprocessor post-open hooks at 1732390498495 (+9 ms)Region opened successfully at 1732390498496 (+1 ms) 2024-11-23T19:34:58,506 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390498373 2024-11-23T19:34:58,527 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:34:58,528 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:34:58,530 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,43113,1732390495279 2024-11-23T19:34:58,534 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,43113,1732390495279, state=OPEN 2024-11-23T19:34:58,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:34:58,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:34:58,601 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:34:58,601 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:34:58,602 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,43113,1732390495279 2024-11-23T19:34:58,609 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:34:58,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,43113,1732390495279 in 399 msec 2024-11-23T19:34:58,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:34:58,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 931 msec 2024-11-23T19:34:58,622 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:34:58,622 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:34:58,647 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:34:58,648 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,43113,1732390495279, seqNum=-1] 2024-11-23T19:34:58,675 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:34:58,678 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40817, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:34:58,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3440 sec 2024-11-23T19:34:58,705 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390498704, completionTime=-1 2024-11-23T19:34:58,708 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:34:58,709 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:34:58,746 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:34:58,746 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390558746 2024-11-23T19:34:58,746 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390618746 2024-11-23T19:34:58,746 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 37 msec 2024-11-23T19:34:58,749 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38567,1732390494017-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,750 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38567,1732390494017-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,750 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38567,1732390494017-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,752 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:38567, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,752 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,753 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,759 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:34:58,802 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.161sec 2024-11-23T19:34:58,803 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:34:58,806 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:34:58,807 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:34:58,808 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:34:58,808 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:34:58,809 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38567,1732390494017-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:34:58,810 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38567,1732390494017-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:34:58,838 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:34:58,839 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:34:58,839 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38567,1732390494017-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:34:58,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:34:58,850 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T19:34:58,850 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T19:34:58,854 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,38567,-1 for getting cluster id 2024-11-23T19:34:58,858 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:34:58,885 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6b9f7251-3bc9-4cf9-a9ec-4913397a7f00' 2024-11-23T19:34:58,890 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:34:58,890 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6b9f7251-3bc9-4cf9-a9ec-4913397a7f00" 2024-11-23T19:34:58,891 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@307d14ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:34:58,891 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,38567,-1] 2024-11-23T19:34:58,895 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:34:58,899 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:34:58,909 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:34:58,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:34:58,918 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:34:58,929 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,43113,1732390495279, seqNum=-1] 2024-11-23T19:34:58,930 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:34:58,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40398, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:34:58,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,38567,1732390494017 2024-11-23T19:34:58,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:34:58,971 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:34:58,976 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T19:34:58,984 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 387b213c044a,38567,1732390494017 2024-11-23T19:34:58,986 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7b956548 2024-11-23T19:34:58,988 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T19:34:58,994 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35104, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T19:34:58,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T19:34:58,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T19:34:59,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:34:59,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-23T19:34:59,026 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T19:34:59,030 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:59,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-23T19:34:59,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T19:34:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:34:59,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741835_1011 (size=389) 2024-11-23T19:34:59,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741835_1011 (size=389) 2024-11-23T19:34:59,213 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6aecd90eb3e41f8e9afc86fcf7aa6992, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d 2024-11-23T19:34:59,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741836_1012 (size=72) 2024-11-23T19:34:59,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741836_1012 (size=72) 2024-11-23T19:34:59,273 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:34:59,273 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6aecd90eb3e41f8e9afc86fcf7aa6992, disabling compactions & flushes 2024-11-23T19:34:59,273 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,274 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,274 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. after waiting 0 ms 2024-11-23T19:34:59,274 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,274 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,274 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: Waiting for close lock at 1732390499273Disabling compacts and flushes for region at 1732390499273Disabling writes for close at 1732390499274 (+1 ms)Writing region close event to WAL at 1732390499274Closed at 1732390499274 2024-11-23T19:34:59,281 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T19:34:59,287 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732390499282"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390499282"}]},"ts":"1732390499282"} 2024-11-23T19:34:59,312 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T19:34:59,317 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T19:34:59,319 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390499317"}]},"ts":"1732390499317"} 2024-11-23T19:34:59,330 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-23T19:34:59,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6aecd90eb3e41f8e9afc86fcf7aa6992, ASSIGN}] 2024-11-23T19:34:59,337 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6aecd90eb3e41f8e9afc86fcf7aa6992, ASSIGN 2024-11-23T19:34:59,342 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6aecd90eb3e41f8e9afc86fcf7aa6992, ASSIGN; state=OFFLINE, location=387b213c044a,43113,1732390495279; forceNewPlan=false, retain=false 2024-11-23T19:34:59,495 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6aecd90eb3e41f8e9afc86fcf7aa6992, regionState=OPENING, regionLocation=387b213c044a,43113,1732390495279 2024-11-23T19:34:59,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6aecd90eb3e41f8e9afc86fcf7aa6992, ASSIGN because future has completed 2024-11-23T19:34:59,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6aecd90eb3e41f8e9afc86fcf7aa6992, server=387b213c044a,43113,1732390495279}] 2024-11-23T19:34:59,668 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,669 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6aecd90eb3e41f8e9afc86fcf7aa6992, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:34:59,669 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,669 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:34:59,670 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,670 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,673 INFO [StoreOpener-6aecd90eb3e41f8e9afc86fcf7aa6992-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,676 INFO [StoreOpener-6aecd90eb3e41f8e9afc86fcf7aa6992-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6aecd90eb3e41f8e9afc86fcf7aa6992 columnFamilyName info 2024-11-23T19:34:59,676 DEBUG [StoreOpener-6aecd90eb3e41f8e9afc86fcf7aa6992-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:34:59,678 INFO [StoreOpener-6aecd90eb3e41f8e9afc86fcf7aa6992-1 {}] regionserver.HStore(327): Store=6aecd90eb3e41f8e9afc86fcf7aa6992/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:34:59,679 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,684 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,688 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,692 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,692 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,712 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,724 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:34:59,731 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6aecd90eb3e41f8e9afc86fcf7aa6992; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865341, jitterRate=0.10033857822418213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:34:59,731 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:34:59,733 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: Running coprocessor pre-open hook at 1732390499670Writing region info on filesystem at 1732390499670Initializing all the Stores at 1732390499672 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390499672Cleaning up temporary data from old regions at 1732390499692 (+20 ms)Running coprocessor post-open hooks at 1732390499732 (+40 ms)Region opened successfully at 1732390499733 (+1 ms) 2024-11-23T19:34:59,745 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992., pid=6, masterSystemTime=1732390499660 2024-11-23T19:34:59,752 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,752 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:34:59,754 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6aecd90eb3e41f8e9afc86fcf7aa6992, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,43113,1732390495279 2024-11-23T19:34:59,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6aecd90eb3e41f8e9afc86fcf7aa6992, server=387b213c044a,43113,1732390495279 because future has completed 2024-11-23T19:34:59,774 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T19:34:59,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6aecd90eb3e41f8e9afc86fcf7aa6992, server=387b213c044a,43113,1732390495279 in 262 msec 2024-11-23T19:34:59,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T19:34:59,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6aecd90eb3e41f8e9afc86fcf7aa6992, ASSIGN in 442 msec 2024-11-23T19:34:59,784 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T19:34:59,785 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390499784"}]},"ts":"1732390499784"} 2024-11-23T19:34:59,789 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-23T19:34:59,792 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T19:34:59,798 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 786 msec 2024-11-23T19:35:03,968 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T19:35:04,035 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T19:35:04,036 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-23T19:35:04,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:35:04,536 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T19:35:04,538 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-23T19:35:04,539 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-23T19:35:04,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:35:04,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T19:35:04,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:35:04,542 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T19:35:09,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:35:09,140 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-23T19:35:09,144 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-23T19:35:09,153 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-23T19:35:09,153 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:35:09,154 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390509154 2024-11-23T19:35:09,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:09,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:09,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:09,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:09,171 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:09,171 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390497953 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390509154 2024-11-23T19:35:09,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741833_1009 (size=451) 2024-11-23T19:35:09,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741833_1009 (size=451) 2024-11-23T19:35:09,185 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390497953 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390497953 2024-11-23T19:35:09,189 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:35:09,199 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992., hostname=387b213c044a,43113,1732390495279, seqNum=2] 2024-11-23T19:35:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43113 {}] regionserver.HRegion(8855): Flush requested on 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:35:21,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:35:21,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/2eba31006fbe407288137d65f7214ef5 is 1080, key is row0001/info:/1732390509203/Put/seqid=0 2024-11-23T19:35:21,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741838_1014 (size=12509) 2024-11-23T19:35:21,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741838_1014 (size=12509) 2024-11-23T19:35:21,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/2eba31006fbe407288137d65f7214ef5 2024-11-23T19:35:21,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/2eba31006fbe407288137d65f7214ef5 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5 2024-11-23T19:35:21,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5, entries=7, sequenceid=11, filesize=12.2 K 2024-11-23T19:35:21,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6aecd90eb3e41f8e9afc86fcf7aa6992 in 464ms, sequenceid=11, compaction requested=false 2024-11-23T19:35:21,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: 2024-11-23T19:35:23,412 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:35:29,252 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390529252 2024-11-23T19:35:29,496 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 241 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:29,497 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:29,497 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:29,497 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:29,497 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:29,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:29,504 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390509154 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390529252 2024-11-23T19:35:29,515 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39483:39483),(127.0.0.1/127.0.0.1:39605:39605)] 2024-11-23T19:35:29,515 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390509154 is not closed yet, will try archiving it next time 2024-11-23T19:35:29,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741837_1013 (size=12399) 2024-11-23T19:35:29,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741837_1013 (size=12399) 2024-11-23T19:35:29,726 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:31,931 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:34,136 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:36,341 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43113 {}] regionserver.HRegion(8855): Flush requested on 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:35:36,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:35:36,544 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:36,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/dea1c2c52e6b4d39b92c4c15796eeec2 is 1080, key is row0008/info:/1732390523239/Put/seqid=0 2024-11-23T19:35:36,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741840_1016 (size=12509) 2024-11-23T19:35:36,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741840_1016 (size=12509) 2024-11-23T19:35:36,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/dea1c2c52e6b4d39b92c4c15796eeec2 2024-11-23T19:35:36,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/dea1c2c52e6b4d39b92c4c15796eeec2 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/dea1c2c52e6b4d39b92c4c15796eeec2 2024-11-23T19:35:36,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/dea1c2c52e6b4d39b92c4c15796eeec2, entries=7, sequenceid=21, filesize=12.2 K 2024-11-23T19:35:36,878 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:36,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6aecd90eb3e41f8e9afc86fcf7aa6992 in 537ms, sequenceid=21, compaction requested=false 2024-11-23T19:35:36,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: 2024-11-23T19:35:36,878 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-23T19:35:36,878 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:35:36,879 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5 because midkey is the same as first or last row 2024-11-23T19:35:38,546 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:39,367 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T19:35:39,367 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T19:35:41,766 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_349425420_22 at /127.0.0.1:57654 [Receiving block BP-601919595-172.17.0.3-1732390489657:blk_1073741839_1015] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1015ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data3/, blockId=1073741839, seqno=8 2024-11-23T19:35:41,766 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_349425420_22 at /127.0.0.1:58596 [Receiving block BP-601919595-172.17.0.3-1732390489657:blk_1073741839_1015] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1015ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data1/, blockId=1073741839, seqno=8 2024-11-23T19:35:41,768 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 1219 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:41,773 WARN [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:41,774 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C43113%2C1732390495279:(num 1732390529252) roll requested 2024-11-23T19:35:41,775 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390541774 2024-11-23T19:35:41,988 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK], DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK]] 2024-11-23T19:35:41,988 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:41,988 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:41,988 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:41,988 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:41,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:41,989 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390529252 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390541774 2024-11-23T19:35:41,990 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:35:41,990 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390529252 is not closed yet, will try archiving it next time 2024-11-23T19:35:41,990 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390509154 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390509154 2024-11-23T19:35:41,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741839_1015 (size=7739) 2024-11-23T19:35:41,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741839_1015 (size=7739) 2024-11-23T19:35:43,974 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:44,670 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6aecd90eb3e41f8e9afc86fcf7aa6992, had cached 0 bytes from a total of 25018 2024-11-23T19:35:46,179 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:48,384 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:50,589 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:52,592 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T19:35:52,593 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390552592 2024-11-23T19:35:53,413 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:35:57,602 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:57,604 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:35:57,604 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C43113%2C1732390495279:(num 1732390552592) roll requested 2024-11-23T19:35:57,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:57,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:57,605 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:57,605 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:57,605 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:35:57,605 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390541774 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390552592 2024-11-23T19:35:57,606 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:35:57,606 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390541774 is not closed yet, will try archiving it next time 2024-11-23T19:35:57,607 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390557607 2024-11-23T19:35:57,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741841_1017 (size=4753) 2024-11-23T19:35:57,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741841_1017 (size=4753) 2024-11-23T19:36:02,611 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:02,611 WARN [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:02,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43113 {}] regionserver.HRegion(8855): Flush requested on 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:36:02,612 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:36:02,654 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5042 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:02,654 WARN [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5042 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:04,612 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T19:36:07,616 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:07,616 WARN [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:07,616 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:07,617 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:07,617 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:07,617 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:07,617 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:07,618 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390552592 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390557607 2024-11-23T19:36:07,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741842_1018 (size=1569) 2024-11-23T19:36:07,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741842_1018 (size=1569) 2024-11-23T19:36:07,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/5098162871014a6d97d3485213da62f3 is 1080, key is row0015/info:/1732390538344/Put/seqid=0 2024-11-23T19:36:07,633 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:36:07,634 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390552592 is not closed yet, will try archiving it next time 2024-11-23T19:36:07,634 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C43113%2C1732390495279:(num 1732390557607) roll requested 2024-11-23T19:36:07,634 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390567634 2024-11-23T19:36:07,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741844_1020 (size=12509) 2024-11-23T19:36:07,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741844_1020 (size=12509) 2024-11-23T19:36:07,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/5098162871014a6d97d3485213da62f3 2024-11-23T19:36:07,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/5098162871014a6d97d3485213da62f3 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5098162871014a6d97d3485213da62f3 2024-11-23T19:36:07,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5098162871014a6d97d3485213da62f3, entries=7, sequenceid=31, filesize=12.2 K 2024-11-23T19:36:12,650 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:12,650 WARN [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:12,737 INFO [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:12,738 WARN [FSHLog-0-hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d-prefix:387b213c044a,43113,1732390495279 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39965,DS-0f8f2722-6705-4966-abdb-1a73ca76a3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45339,DS-d63e6473-7146-45f6-bb38-2973f26f02ad,DISK]] 2024-11-23T19:36:12,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6aecd90eb3e41f8e9afc86fcf7aa6992 in 10125ms, sequenceid=31, compaction requested=true 2024-11-23T19:36:12,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: 2024-11-23T19:36:12,738 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,738 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-23T19:36:12,738 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:36:12,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,738 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5 because midkey is the same as first or last row 2024-11-23T19:36:12,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,739 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,739 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390557607 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390567634 2024-11-23T19:36:12,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aecd90eb3e41f8e9afc86fcf7aa6992:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:36:12,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:36:12,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741843_1019 (size=438) 2024-11-23T19:36:12,744 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:36:12,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741843_1019 (size=438) 2024-11-23T19:36:12,746 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390529252 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390529252 2024-11-23T19:36:12,746 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:36:12,746 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C43113%2C1732390495279:(num 1732390567634) roll requested 2024-11-23T19:36:12,747 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390572746 2024-11-23T19:36:12,749 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390541774 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390541774 2024-11-23T19:36:12,750 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:36:12,751 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390552592 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390552592 2024-11-23T19:36:12,752 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.HStore(1541): 6aecd90eb3e41f8e9afc86fcf7aa6992/info is initiating minor compaction (all files) 2024-11-23T19:36:12,753 INFO [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6aecd90eb3e41f8e9afc86fcf7aa6992/info in TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:36:12,754 INFO [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/dea1c2c52e6b4d39b92c4c15796eeec2, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5098162871014a6d97d3485213da62f3] into tmpdir=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp, totalSize=36.6 K 2024-11-23T19:36:12,756 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390557607 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390557607 2024-11-23T19:36:12,761 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2eba31006fbe407288137d65f7214ef5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732390509203 2024-11-23T19:36:12,762 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] compactions.Compactor(225): Compacting dea1c2c52e6b4d39b92c4c15796eeec2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732390523239 2024-11-23T19:36:12,765 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5098162871014a6d97d3485213da62f3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732390538344 2024-11-23T19:36:12,772 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,774 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,776 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,777 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,777 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390567634 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390572746 2024-11-23T19:36:12,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741845_1021 (size=93) 2024-11-23T19:36:12,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741845_1021 (size=93) 2024-11-23T19:36:12,782 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390567634 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs/387b213c044a%2C43113%2C1732390495279.1732390567634 2024-11-23T19:36:12,800 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39483:39483),(127.0.0.1/127.0.0.1:39605:39605)] 2024-11-23T19:36:12,803 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43113%2C1732390495279.1732390572802 2024-11-23T19:36:12,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,859 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,860 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,861 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:12,862 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390572746 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390572802 2024-11-23T19:36:12,870 INFO [RS:0;387b213c044a:43113-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aecd90eb3e41f8e9afc86fcf7aa6992#info#compaction#3 average throughput is 5.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:36:12,872 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/f8e64fb2bf384cc8a85fb3aaa922caa1 is 1080, key is row0001/info:/1732390509203/Put/seqid=0 2024-11-23T19:36:12,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741846_1022 (size=1258) 2024-11-23T19:36:12,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741846_1022 (size=1258) 2024-11-23T19:36:12,883 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39605:39605),(127.0.0.1/127.0.0.1:39483:39483)] 2024-11-23T19:36:12,884 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/WALs/387b213c044a,43113,1732390495279/387b213c044a%2C43113%2C1732390495279.1732390572746 is not closed yet, will try archiving it next time 2024-11-23T19:36:12,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741848_1024 (size=27710) 2024-11-23T19:36:12,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741848_1024 (size=27710) 2024-11-23T19:36:13,000 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/f8e64fb2bf384cc8a85fb3aaa922caa1 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f8e64fb2bf384cc8a85fb3aaa922caa1 2024-11-23T19:36:13,075 INFO [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6aecd90eb3e41f8e9afc86fcf7aa6992/info of 6aecd90eb3e41f8e9afc86fcf7aa6992 into f8e64fb2bf384cc8a85fb3aaa922caa1(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:36:13,075 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: 2024-11-23T19:36:13,077 INFO [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992., storeName=6aecd90eb3e41f8e9afc86fcf7aa6992/info, priority=13, startTime=1732390572740; duration=0sec 2024-11-23T19:36:13,078 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-23T19:36:13,078 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:36:13,078 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f8e64fb2bf384cc8a85fb3aaa922caa1 because midkey is the same as first or last row 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f8e64fb2bf384cc8a85fb3aaa922caa1 because midkey is the same as first or last row 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f8e64fb2bf384cc8a85fb3aaa922caa1 because midkey is the same as first or last row 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:36:13,079 DEBUG [RS:0;387b213c044a:43113-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aecd90eb3e41f8e9afc86fcf7aa6992:info 2024-11-23T19:36:23,413 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:36:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43113 {}] regionserver.HRegion(8855): Flush requested on 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:36:24,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:36:24,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/f165175b642b4af3ad4646c7457abe82 is 1080, key is row0022/info:/1732390572803/Put/seqid=0 2024-11-23T19:36:24,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741849_1025 (size=12509) 2024-11-23T19:36:24,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741849_1025 (size=12509) 2024-11-23T19:36:24,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/f165175b642b4af3ad4646c7457abe82 2024-11-23T19:36:24,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/f165175b642b4af3ad4646c7457abe82 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f165175b642b4af3ad4646c7457abe82 2024-11-23T19:36:25,022 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f165175b642b4af3ad4646c7457abe82, entries=7, sequenceid=42, filesize=12.2 K 2024-11-23T19:36:25,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6aecd90eb3e41f8e9afc86fcf7aa6992 in 195ms, sequenceid=42, compaction requested=false 2024-11-23T19:36:25,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: 2024-11-23T19:36:25,038 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-23T19:36:25,038 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:36:25,038 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/f8e64fb2bf384cc8a85fb3aaa922caa1 because midkey is the same as first or last row 2024-11-23T19:36:29,670 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6aecd90eb3e41f8e9afc86fcf7aa6992, had cached 0 bytes from a total of 40219 2024-11-23T19:36:32,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:36:32,856 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:36:32,857 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:36:32,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:32,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:32,865 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:36:32,865 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:36:32,865 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2125010042, stopped=false 2024-11-23T19:36:32,865 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,38567,1732390494017 2024-11-23T19:36:32,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:32,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:32,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:32,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:32,928 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:36:32,928 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:36:32,929 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:36:32,929 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:32,929 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:32,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:32,930 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,43113,1732390495279' ***** 2024-11-23T19:36:32,930 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:36:32,931 INFO [RS:0;387b213c044a:43113 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:36:32,931 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:36:32,931 INFO [RS:0;387b213c044a:43113 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:36:32,932 INFO [RS:0;387b213c044a:43113 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:36:32,932 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(3091): Received CLOSE for 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:36:32,933 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,43113,1732390495279 2024-11-23T19:36:32,933 INFO [RS:0;387b213c044a:43113 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:36:32,933 INFO [RS:0;387b213c044a:43113 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:43113. 2024-11-23T19:36:32,934 DEBUG [RS:0;387b213c044a:43113 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:36:32,934 DEBUG [RS:0;387b213c044a:43113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:32,934 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6aecd90eb3e41f8e9afc86fcf7aa6992, disabling compactions & flushes 2024-11-23T19:36:32,934 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:36:32,934 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:36:32,934 INFO [RS:0;387b213c044a:43113 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:36:32,934 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. after waiting 0 ms 2024-11-23T19:36:32,934 INFO [RS:0;387b213c044a:43113 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:36:32,934 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:36:32,934 INFO [RS:0;387b213c044a:43113 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:36:32,935 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:36:32,935 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-23T19:36:32,935 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T19:36:32,935 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1325): Online Regions={6aecd90eb3e41f8e9afc86fcf7aa6992=TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T19:36:32,935 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:36:32,935 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:36:32,935 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:36:32,936 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:36:32,936 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:36:32,936 DEBUG [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6aecd90eb3e41f8e9afc86fcf7aa6992 2024-11-23T19:36:32,936 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-23T19:36:32,941 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/5b4c979d25bf4b679e1edcbb87943ebd is 1080, key is row0029/info:/1732390586845/Put/seqid=0 2024-11-23T19:36:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741850_1026 (size=8193) 2024-11-23T19:36:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741850_1026 (size=8193) 2024-11-23T19:36:32,949 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/5b4c979d25bf4b679e1edcbb87943ebd 2024-11-23T19:36:32,956 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/info/d746ee7ff569489b9dd54801c7f18573 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992./info:regioninfo/1732390499754/Put/seqid=0 2024-11-23T19:36:32,958 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/.tmp/info/5b4c979d25bf4b679e1edcbb87943ebd as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5b4c979d25bf4b679e1edcbb87943ebd 2024-11-23T19:36:32,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741851_1027 (size=7016) 2024-11-23T19:36:32,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741851_1027 (size=7016) 2024-11-23T19:36:32,965 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/info/d746ee7ff569489b9dd54801c7f18573 2024-11-23T19:36:32,968 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5b4c979d25bf4b679e1edcbb87943ebd, entries=3, sequenceid=48, filesize=8.0 K 2024-11-23T19:36:32,969 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6aecd90eb3e41f8e9afc86fcf7aa6992 in 35ms, sequenceid=48, compaction requested=true 2024-11-23T19:36:32,970 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/dea1c2c52e6b4d39b92c4c15796eeec2, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5098162871014a6d97d3485213da62f3] to archive 2024-11-23T19:36:32,973 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T19:36:32,977 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/2eba31006fbe407288137d65f7214ef5 2024-11-23T19:36:32,979 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/dea1c2c52e6b4d39b92c4c15796eeec2 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/dea1c2c52e6b4d39b92c4c15796eeec2 2024-11-23T19:36:32,981 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5098162871014a6d97d3485213da62f3 to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/info/5098162871014a6d97d3485213da62f3 2024-11-23T19:36:32,992 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/ns/5a41d7626dad4444aa888862c8def7b3 is 43, key is default/ns:d/1732390498684/Put/seqid=0 2024-11-23T19:36:32,992 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=387b213c044a:38567 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T19:36:32,997 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2eba31006fbe407288137d65f7214ef5=12509, dea1c2c52e6b4d39b92c4c15796eeec2=12509, 5098162871014a6d97d3485213da62f3=12509] 2024-11-23T19:36:32,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741852_1028 (size=5153) 2024-11-23T19:36:32,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741852_1028 (size=5153) 2024-11-23T19:36:32,999 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/ns/5a41d7626dad4444aa888862c8def7b3 2024-11-23T19:36:33,002 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/default/TestLogRolling-testSlowSyncLogRolling/6aecd90eb3e41f8e9afc86fcf7aa6992/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-23T19:36:33,004 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:36:33,004 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6aecd90eb3e41f8e9afc86fcf7aa6992: Waiting for close lock at 1732390592933Running coprocessor pre-close hooks at 1732390592934 (+1 ms)Disabling compacts and flushes for region at 1732390592934Disabling writes for close at 1732390592934Obtaining lock to block concurrent updates at 1732390592935 (+1 ms)Preparing flush snapshotting stores in 6aecd90eb3e41f8e9afc86fcf7aa6992 at 1732390592935Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732390592935Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. at 1732390592936 (+1 ms)Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992/info: creating writer at 1732390592936Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992/info: appending metadata at 1732390592940 (+4 ms)Flushing 6aecd90eb3e41f8e9afc86fcf7aa6992/info: closing flushed file at 1732390592940Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a6eeab6: reopening flushed file at 1732390592957 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6aecd90eb3e41f8e9afc86fcf7aa6992 in 35ms, sequenceid=48, compaction requested=true at 1732390592969 (+12 ms)Writing region close event to WAL at 1732390592998 (+29 ms)Running coprocessor post-close hooks at 1732390593003 (+5 ms)Closed at 1732390593004 (+1 ms) 2024-11-23T19:36:33,005 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732390498997.6aecd90eb3e41f8e9afc86fcf7aa6992. 2024-11-23T19:36:33,021 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/table/7555ce17543b4c1d83edcfb582481081 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732390499784/Put/seqid=0 2024-11-23T19:36:33,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741853_1029 (size=5396) 2024-11-23T19:36:33,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741853_1029 (size=5396) 2024-11-23T19:36:33,028 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/table/7555ce17543b4c1d83edcfb582481081 2024-11-23T19:36:33,036 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/info/d746ee7ff569489b9dd54801c7f18573 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/info/d746ee7ff569489b9dd54801c7f18573 2024-11-23T19:36:33,045 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/info/d746ee7ff569489b9dd54801c7f18573, entries=10, sequenceid=11, filesize=6.9 K 2024-11-23T19:36:33,046 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/ns/5a41d7626dad4444aa888862c8def7b3 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/ns/5a41d7626dad4444aa888862c8def7b3 2024-11-23T19:36:33,055 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/ns/5a41d7626dad4444aa888862c8def7b3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T19:36:33,057 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/.tmp/table/7555ce17543b4c1d83edcfb582481081 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/table/7555ce17543b4c1d83edcfb582481081 2024-11-23T19:36:33,065 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/table/7555ce17543b4c1d83edcfb582481081, entries=2, sequenceid=11, filesize=5.3 K 2024-11-23T19:36:33,066 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-11-23T19:36:33,072 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T19:36:33,073 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:36:33,073 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:36:33,073 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390592935Running coprocessor pre-close hooks at 1732390592935Disabling compacts and flushes for region at 1732390592935Disabling writes for close at 1732390592936 (+1 ms)Obtaining lock to block concurrent updates at 1732390592936Preparing flush snapshotting stores in 1588230740 at 1732390592936Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732390592936Flushing stores of hbase:meta,,1.1588230740 at 1732390592937 (+1 ms)Flushing 1588230740/info: creating writer at 1732390592937Flushing 1588230740/info: appending metadata at 1732390592955 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732390592955Flushing 1588230740/ns: creating writer at 1732390592973 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732390592991 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732390592991Flushing 1588230740/table: creating writer at 1732390593006 (+15 ms)Flushing 1588230740/table: appending metadata at 1732390593021 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732390593021Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57702062: reopening flushed file at 1732390593035 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6317d282: reopening flushed file at 1732390593045 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c22e5d4: reopening flushed file at 1732390593055 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false at 1732390593067 (+12 ms)Writing region close event to WAL at 1732390593068 (+1 ms)Running coprocessor post-close hooks at 1732390593073 (+5 ms)Closed at 1732390593073 2024-11-23T19:36:33,073 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:36:33,136 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,43113,1732390495279; all regions closed. 2024-11-23T19:36:33,140 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,140 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,141 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741834_1010 (size=3066) 2024-11-23T19:36:33,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741834_1010 (size=3066) 2024-11-23T19:36:33,151 DEBUG [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs 2024-11-23T19:36:33,151 INFO [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C43113%2C1732390495279.meta:.meta(num 1732390498415) 2024-11-23T19:36:33,152 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,152 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,152 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741847_1023 (size=12695) 2024-11-23T19:36:33,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741847_1023 (size=12695) 2024-11-23T19:36:33,161 DEBUG [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/oldWALs 2024-11-23T19:36:33,161 INFO [RS:0;387b213c044a:43113 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C43113%2C1732390495279:(num 1732390572802) 2024-11-23T19:36:33,161 DEBUG [RS:0;387b213c044a:43113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:33,161 INFO [RS:0;387b213c044a:43113 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:36:33,161 INFO [RS:0;387b213c044a:43113 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:36:33,162 INFO [RS:0;387b213c044a:43113 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:36:33,162 INFO [RS:0;387b213c044a:43113 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:36:33,162 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:36:33,163 INFO [RS:0;387b213c044a:43113 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43113 2024-11-23T19:36:33,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,43113,1732390495279 2024-11-23T19:36:33,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:36:33,178 INFO [RS:0;387b213c044a:43113 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:36:33,179 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,43113,1732390495279] 2024-11-23T19:36:33,200 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,43113,1732390495279 already deleted, retry=false 2024-11-23T19:36:33,201 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,43113,1732390495279 expired; onlineServers=0 2024-11-23T19:36:33,201 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,38567,1732390494017' ***** 2024-11-23T19:36:33,201 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:36:33,202 INFO [M:0;387b213c044a:38567 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:36:33,202 INFO [M:0;387b213c044a:38567 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:36:33,202 DEBUG [M:0;387b213c044a:38567 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:36:33,203 DEBUG [M:0;387b213c044a:38567 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:36:33,203 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:36:33,203 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390497514 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390497514,5,FailOnTimeoutGroup] 2024-11-23T19:36:33,203 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390497517 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390497517,5,FailOnTimeoutGroup] 2024-11-23T19:36:33,203 INFO [M:0;387b213c044a:38567 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:36:33,203 INFO [M:0;387b213c044a:38567 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:36:33,204 DEBUG [M:0;387b213c044a:38567 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:36:33,204 INFO [M:0;387b213c044a:38567 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:36:33,204 INFO [M:0;387b213c044a:38567 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:36:33,205 INFO [M:0;387b213c044a:38567 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:36:33,205 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:36:33,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:36:33,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:33,211 DEBUG [M:0;387b213c044a:38567 {}] zookeeper.ZKUtil(347): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:36:33,211 WARN [M:0;387b213c044a:38567 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:36:33,213 INFO [M:0;387b213c044a:38567 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/.lastflushedseqids 2024-11-23T19:36:33,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741854_1030 (size=130) 2024-11-23T19:36:33,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741854_1030 (size=130) 2024-11-23T19:36:33,227 INFO [M:0;387b213c044a:38567 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:36:33,227 INFO [M:0;387b213c044a:38567 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:36:33,228 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:36:33,228 INFO [M:0;387b213c044a:38567 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:33,228 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:33,228 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:36:33,228 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:33,228 INFO [M:0;387b213c044a:38567 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-23T19:36:33,246 DEBUG [M:0;387b213c044a:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4add1cc554334f48bf3cb1aaf84ef293 is 82, key is hbase:meta,,1/info:regioninfo/1732390498530/Put/seqid=0 2024-11-23T19:36:33,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741855_1031 (size=5672) 2024-11-23T19:36:33,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741855_1031 (size=5672) 2024-11-23T19:36:33,252 INFO [M:0;387b213c044a:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4add1cc554334f48bf3cb1aaf84ef293 2024-11-23T19:36:33,275 DEBUG [M:0;387b213c044a:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f875e87faf8e437ba8f48047eb441ab5 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732390499796/Put/seqid=0 2024-11-23T19:36:33,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741856_1032 (size=6247) 2024-11-23T19:36:33,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741856_1032 (size=6247) 2024-11-23T19:36:33,281 INFO [M:0;387b213c044a:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f875e87faf8e437ba8f48047eb441ab5 2024-11-23T19:36:33,288 INFO [M:0;387b213c044a:38567 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f875e87faf8e437ba8f48047eb441ab5 2024-11-23T19:36:33,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:33,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43113-0x101693118c90001, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:33,291 INFO [RS:0;387b213c044a:43113 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:36:33,291 INFO [RS:0;387b213c044a:43113 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,43113,1732390495279; zookeeper connection closed. 2024-11-23T19:36:33,291 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@470ebd4c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@470ebd4c 2024-11-23T19:36:33,292 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T19:36:33,305 DEBUG [M:0;387b213c044a:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a3d680ea9ef41d2aeb035c023b3408d is 69, key is 387b213c044a,43113,1732390495279/rs:state/1732390497636/Put/seqid=0 2024-11-23T19:36:33,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741857_1033 (size=5156) 2024-11-23T19:36:33,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741857_1033 (size=5156) 2024-11-23T19:36:33,313 INFO [M:0;387b213c044a:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a3d680ea9ef41d2aeb035c023b3408d 2024-11-23T19:36:33,337 DEBUG [M:0;387b213c044a:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17b9cb48d8ae4ed5bf97b607fe6492d5 is 52, key is load_balancer_on/state:d/1732390498968/Put/seqid=0 2024-11-23T19:36:33,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741858_1034 (size=5056) 2024-11-23T19:36:33,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741858_1034 (size=5056) 2024-11-23T19:36:33,346 INFO [M:0;387b213c044a:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17b9cb48d8ae4ed5bf97b607fe6492d5 2024-11-23T19:36:33,355 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4add1cc554334f48bf3cb1aaf84ef293 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4add1cc554334f48bf3cb1aaf84ef293 2024-11-23T19:36:33,363 INFO [M:0;387b213c044a:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4add1cc554334f48bf3cb1aaf84ef293, entries=8, sequenceid=59, filesize=5.5 K 2024-11-23T19:36:33,365 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f875e87faf8e437ba8f48047eb441ab5 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f875e87faf8e437ba8f48047eb441ab5 2024-11-23T19:36:33,373 INFO [M:0;387b213c044a:38567 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f875e87faf8e437ba8f48047eb441ab5 2024-11-23T19:36:33,373 INFO [M:0;387b213c044a:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f875e87faf8e437ba8f48047eb441ab5, entries=6, sequenceid=59, filesize=6.1 K 2024-11-23T19:36:33,374 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a3d680ea9ef41d2aeb035c023b3408d as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7a3d680ea9ef41d2aeb035c023b3408d 2024-11-23T19:36:33,382 INFO [M:0;387b213c044a:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7a3d680ea9ef41d2aeb035c023b3408d, entries=1, sequenceid=59, filesize=5.0 K 2024-11-23T19:36:33,383 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17b9cb48d8ae4ed5bf97b607fe6492d5 as hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17b9cb48d8ae4ed5bf97b607fe6492d5 2024-11-23T19:36:33,396 INFO [M:0;387b213c044a:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17b9cb48d8ae4ed5bf97b607fe6492d5, entries=1, sequenceid=59, filesize=4.9 K 2024-11-23T19:36:33,398 INFO [M:0;387b213c044a:38567 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=59, compaction requested=false 2024-11-23T19:36:33,402 INFO [M:0;387b213c044a:38567 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:33,402 DEBUG [M:0;387b213c044a:38567 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390593227Disabling compacts and flushes for region at 1732390593227Disabling writes for close at 1732390593228 (+1 ms)Obtaining lock to block concurrent updates at 1732390593228Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390593228Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732390593228Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390593229 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390593229Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390593246 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390593246Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390593258 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390593274 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390593274Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390593289 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390593305 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390593305Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390593320 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390593336 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390593336Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cd90010: reopening flushed file at 1732390593353 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c8c5a9a: reopening flushed file at 1732390593363 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c4f355f: reopening flushed file at 1732390593373 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63893b45: reopening flushed file at 1732390593382 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=59, compaction requested=false at 1732390593398 (+16 ms)Writing region close event to WAL at 1732390593402 (+4 ms)Closed at 1732390593402 2024-11-23T19:36:33,403 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,403 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,403 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,404 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,404 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:33,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45339 is added to blk_1073741830_1006 (size=27973) 2024-11-23T19:36:33,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741830_1006 (size=27973) 2024-11-23T19:36:33,408 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:36:33,408 INFO [M:0;387b213c044a:38567 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:36:33,409 INFO [M:0;387b213c044a:38567 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38567 2024-11-23T19:36:33,409 INFO [M:0;387b213c044a:38567 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:36:33,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:33,522 INFO [M:0;387b213c044a:38567 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:36:33,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x101693118c90000, quorum=127.0.0.1:58400, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:33,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:33,530 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:33,530 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:33,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:33,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:33,534 WARN [BP-601919595-172.17.0.3-1732390489657 heartbeating to localhost/127.0.0.1:35281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:33,534 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:33,534 WARN [BP-601919595-172.17.0.3-1732390489657 heartbeating to localhost/127.0.0.1:35281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-601919595-172.17.0.3-1732390489657 (Datanode Uuid 07684f41-35f0-44c2-aa43-e9f17a039632) service to localhost/127.0.0.1:35281 2024-11-23T19:36:33,534 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:33,536 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data3/current/BP-601919595-172.17.0.3-1732390489657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:33,536 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data4/current/BP-601919595-172.17.0.3-1732390489657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:33,537 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:33,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:33,549 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:33,550 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:33,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:33,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:33,552 WARN [BP-601919595-172.17.0.3-1732390489657 heartbeating to localhost/127.0.0.1:35281 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:33,552 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:33,552 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:33,552 WARN [BP-601919595-172.17.0.3-1732390489657 heartbeating to localhost/127.0.0.1:35281 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-601919595-172.17.0.3-1732390489657 (Datanode Uuid 404f4ad1-b202-464b-a485-35b09c1fe890) service to localhost/127.0.0.1:35281 2024-11-23T19:36:33,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data1/current/BP-601919595-172.17.0.3-1732390489657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:33,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/cluster_4591f455-85e2-8b21-e172-08dc0300edb1/data/data2/current/BP-601919595-172.17.0.3-1732390489657 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:33,553 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:33,784 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:36:34,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:36:34,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:36:34,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:36:34,538 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-23T19:36:35,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:36:35,093 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:35,094 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:35,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:35,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:35,108 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:36:35,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:36:35,186 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35281 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35281 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: master/387b213c044a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/387b213c044a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35281 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35281 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/387b213c044a:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35281 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5f2d9ff8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35281 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1009 (was 899) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2525 (was 4472) 2024-11-23T19:36:35,195 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=1009, ProcessCount=11, AvailableMemoryMB=2523 2024-11-23T19:36:35,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.log.dir so I do NOT create it in target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d5564dd1-07b7-6649-49db-a8810bfe43a1/hadoop.tmp.dir so I do NOT create it in target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812, deleteOnExit=true 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/test.cache.data in system properties and HBase conf 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:36:35,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:36:35,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:36:35,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:36:35,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:36:35,197 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:36:35,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:36:35,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:36:35,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:36:35,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:36:35,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:36:35,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:36:35,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:36:35,221 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:36:35,685 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:35,700 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:35,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:35,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:35,729 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:36:35,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:35,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:35,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:35,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/java.io.tmpdir/jetty-localhost-35909-hadoop-hdfs-3_4_1-tests_jar-_-any-3153495926681601032/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:36:35,916 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:35909} 2024-11-23T19:36:35,916 INFO [Time-limited test {}] server.Server(415): Started @108447ms 2024-11-23T19:36:35,935 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:36:36,374 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:36,381 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:36,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:36,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:36,397 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:36:36,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:36,401 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:36,537 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4595827f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/java.io.tmpdir/jetty-localhost-36843-hadoop-hdfs-3_4_1-tests_jar-_-any-17438675348012884796/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:36,537 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:36843} 2024-11-23T19:36:36,538 INFO [Time-limited test {}] server.Server(415): Started @109068ms 2024-11-23T19:36:36,540 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:36,736 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:36,764 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:36,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:36,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:36,777 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:36:36,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:36,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:36,911 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da5059a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/java.io.tmpdir/jetty-localhost-38933-hadoop-hdfs-3_4_1-tests_jar-_-any-7144158177708140635/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:36,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:38933} 2024-11-23T19:36:36,911 INFO [Time-limited test {}] server.Server(415): Started @109442ms 2024-11-23T19:36:36,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:37,629 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data1/current/BP-2131376814-172.17.0.3-1732390595240/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:37,629 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data2/current/BP-2131376814-172.17.0.3-1732390595240/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:37,655 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x679b601db25180d8 with lease ID 0x855a0dc46836f9c3: Processing first storage report for DS-f1605499-6dad-4d8f-8b17-e22114c53933 from datanode DatanodeRegistration(127.0.0.1:44095, datanodeUuid=bcecba81-64f0-420b-94c4-a2cab477d4fb, infoPort=33985, infoSecurePort=0, ipcPort=41147, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240) 2024-11-23T19:36:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x679b601db25180d8 with lease ID 0x855a0dc46836f9c3: from storage DS-f1605499-6dad-4d8f-8b17-e22114c53933 node DatanodeRegistration(127.0.0.1:44095, datanodeUuid=bcecba81-64f0-420b-94c4-a2cab477d4fb, infoPort=33985, infoSecurePort=0, ipcPort=41147, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x679b601db25180d8 with lease ID 0x855a0dc46836f9c3: Processing first storage report for DS-cc8af065-d68c-4d36-b197-8f7270256820 from datanode DatanodeRegistration(127.0.0.1:44095, datanodeUuid=bcecba81-64f0-420b-94c4-a2cab477d4fb, infoPort=33985, infoSecurePort=0, ipcPort=41147, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240) 2024-11-23T19:36:37,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x679b601db25180d8 with lease ID 0x855a0dc46836f9c3: from storage DS-cc8af065-d68c-4d36-b197-8f7270256820 node DatanodeRegistration(127.0.0.1:44095, datanodeUuid=bcecba81-64f0-420b-94c4-a2cab477d4fb, infoPort=33985, infoSecurePort=0, ipcPort=41147, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:38,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:38,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:38,054 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data3/current/BP-2131376814-172.17.0.3-1732390595240/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:38,054 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data4/current/BP-2131376814-172.17.0.3-1732390595240/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:38,075 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:38,077 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6b9fbd63ca20cda with lease ID 0x855a0dc46836f9c4: Processing first storage report for DS-dc5301d0-7f35-4b22-9f49-0e2a1a16fda9 from datanode DatanodeRegistration(127.0.0.1:39887, datanodeUuid=53432c00-be6e-4f4f-9ab5-dd67ff79e9fa, infoPort=38935, infoSecurePort=0, ipcPort=34111, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240) 2024-11-23T19:36:38,077 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6b9fbd63ca20cda with lease ID 0x855a0dc46836f9c4: from storage DS-dc5301d0-7f35-4b22-9f49-0e2a1a16fda9 node DatanodeRegistration(127.0.0.1:39887, datanodeUuid=53432c00-be6e-4f4f-9ab5-dd67ff79e9fa, infoPort=38935, infoSecurePort=0, ipcPort=34111, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:38,077 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6b9fbd63ca20cda with lease ID 0x855a0dc46836f9c4: Processing first storage report for DS-f08f3018-812b-45c8-bf5a-e4dbb5811c55 from datanode DatanodeRegistration(127.0.0.1:39887, datanodeUuid=53432c00-be6e-4f4f-9ab5-dd67ff79e9fa, infoPort=38935, infoSecurePort=0, ipcPort=34111, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240) 2024-11-23T19:36:38,077 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6b9fbd63ca20cda with lease ID 0x855a0dc46836f9c4: from storage DS-f08f3018-812b-45c8-bf5a-e4dbb5811c55 node DatanodeRegistration(127.0.0.1:39887, datanodeUuid=53432c00-be6e-4f4f-9ab5-dd67ff79e9fa, infoPort=38935, infoSecurePort=0, ipcPort=34111, storageInfo=lv=-57;cid=testClusterID;nsid=1599898198;c=1732390595240), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:38,152 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0 2024-11-23T19:36:38,155 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/zookeeper_0, clientPort=53621, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:36:38,156 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53621 2024-11-23T19:36:38,156 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,158 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:36:38,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:36:38,170 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0 with version=8 2024-11-23T19:36:38,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:36:38,172 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:36:38,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:38,173 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:38,173 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:36:38,173 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:38,173 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:36:38,173 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:36:38,173 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:36:38,174 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37181 2024-11-23T19:36:38,177 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37181 connecting to ZooKeeper ensemble=127.0.0.1:53621 2024-11-23T19:36:38,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:371810x0, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:36:38,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37181-0x1016932b1b60000 connected 2024-11-23T19:36:38,324 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,330 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:38,337 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0, hbase.cluster.distributed=false 2024-11-23T19:36:38,339 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:36:38,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37181 2024-11-23T19:36:38,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37181 2024-11-23T19:36:38,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37181 2024-11-23T19:36:38,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37181 2024-11-23T19:36:38,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37181 2024-11-23T19:36:38,359 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:36:38,359 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:38,359 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:38,359 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:36:38,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:38,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:36:38,360 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:36:38,360 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:36:38,361 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34323 2024-11-23T19:36:38,362 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34323 connecting to ZooKeeper ensemble=127.0.0.1:53621 2024-11-23T19:36:38,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343230x0, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:36:38,408 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:343230x0, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:38,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34323-0x1016932b1b60001 connected 2024-11-23T19:36:38,408 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:36:38,409 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:36:38,409 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:36:38,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:36:38,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34323 2024-11-23T19:36:38,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34323 2024-11-23T19:36:38,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34323 2024-11-23T19:36:38,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34323 2024-11-23T19:36:38,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34323 2024-11-23T19:36:38,430 DEBUG [M:0;387b213c044a:37181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:37181 2024-11-23T19:36:38,431 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,37181,1732390598172 2024-11-23T19:36:38,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:38,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:38,439 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,37181,1732390598172 2024-11-23T19:36:38,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:36:38,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,450 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:36:38,450 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,37181,1732390598172 from backup master directory 2024-11-23T19:36:38,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:38,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,37181,1732390598172 2024-11-23T19:36:38,460 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:36:38,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:38,460 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,37181,1732390598172 2024-11-23T19:36:38,465 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/hbase.id] with ID: e5039eea-e9ea-4763-9eaa-3ba3380d7e30 2024-11-23T19:36:38,465 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/.tmp/hbase.id 2024-11-23T19:36:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:36:38,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:36:38,474 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/.tmp/hbase.id]:[hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/hbase.id] 2024-11-23T19:36:38,490 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:38,490 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:36:38,492 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T19:36:38,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:36:38,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:36:38,509 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:36:38,510 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:36:38,511 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:38,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:36:38,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:36:38,520 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store 2024-11-23T19:36:38,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:36:38,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:36:38,529 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:38,529 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:36:38,529 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:38,529 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:38,529 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:36:38,529 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:38,529 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:38,529 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390598529Disabling compacts and flushes for region at 1732390598529Disabling writes for close at 1732390598529Writing region close event to WAL at 1732390598529Closed at 1732390598529 2024-11-23T19:36:38,530 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/.initializing 2024-11-23T19:36:38,531 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/WALs/387b213c044a,37181,1732390598172 2024-11-23T19:36:38,532 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:36:38,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C37181%2C1732390598172, suffix=, logDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/WALs/387b213c044a,37181,1732390598172, archiveDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/oldWALs, maxLogs=10 2024-11-23T19:36:38,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C37181%2C1732390598172.1732390598535 2024-11-23T19:36:38,547 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/WALs/387b213c044a,37181,1732390598172/387b213c044a%2C37181%2C1732390598172.1732390598535 2024-11-23T19:36:38,551 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38935:38935),(127.0.0.1/127.0.0.1:33985:33985)] 2024-11-23T19:36:38,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:38,553 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:36:38,554 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:38,554 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,554 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:36:38,558 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:38,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:36:38,561 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:38,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:36:38,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:38,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:36:38,568 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:38,568 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,569 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,570 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,571 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,571 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,572 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:36:38,573 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:38,577 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:36:38,577 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708034, jitterRate=-0.09968927502632141}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:36:38,579 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390598554Initializing all the Stores at 1732390598555 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390598555Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390598556 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390598556Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390598556Cleaning up temporary data from old regions at 1732390598571 (+15 ms)Region opened successfully at 1732390598578 (+7 ms) 2024-11-23T19:36:38,579 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:36:38,583 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bc56628, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:36:38,584 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:36:38,584 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:36:38,584 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:36:38,584 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:36:38,585 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T19:36:38,585 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T19:36:38,585 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:36:38,589 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:36:38,590 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:36:38,596 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:36:38,597 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:36:38,597 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:36:38,607 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:36:38,607 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:36:38,608 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:36:38,617 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:36:38,619 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:36:38,628 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:36:38,631 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:36:38,638 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:36:38,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:38,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:38,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,650 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,37181,1732390598172, sessionid=0x1016932b1b60000, setting cluster-up flag (Was=false) 2024-11-23T19:36:38,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,702 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:36:38,707 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,37181,1732390598172 2024-11-23T19:36:38,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:38,765 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:36:38,767 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,37181,1732390598172 2024-11-23T19:36:38,769 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:36:38,771 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:38,772 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:36:38,772 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:36:38,772 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,37181,1732390598172 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:36:38,775 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:38,775 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:38,775 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:38,775 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:38,775 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:36:38,775 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,776 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:36:38,776 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390628777 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:36:38,777 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:36:38,778 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,778 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:36:38,778 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:38,778 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:36:38,778 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:36:38,778 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:36:38,779 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:36:38,779 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:36:38,779 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390598779,5,FailOnTimeoutGroup] 2024-11-23T19:36:38,780 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390598779,5,FailOnTimeoutGroup] 2024-11-23T19:36:38,780 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,780 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:36:38,780 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,780 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,781 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,781 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:36:38,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:36:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:36:38,792 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:36:38,792 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0 2024-11-23T19:36:38,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:36:38,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:36:38,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:38,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:36:38,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:36:38,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:38,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:36:38,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:36:38,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:38,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:36:38,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:36:38,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:38,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:36:38,817 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(746): ClusterId : e5039eea-e9ea-4763-9eaa-3ba3380d7e30 2024-11-23T19:36:38,817 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:36:38,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:36:38,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:38,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:38,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:36:38,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740 2024-11-23T19:36:38,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740 2024-11-23T19:36:38,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:36:38,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:36:38,823 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:36:38,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:36:38,828 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:36:38,829 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793438, jitterRate=0.008909463882446289}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:36:38,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390598805Initializing all the Stores at 1732390598806 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390598806Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390598806Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390598806Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390598806Cleaning up temporary data from old regions at 1732390598823 (+17 ms)Region opened successfully at 1732390598830 (+7 ms) 2024-11-23T19:36:38,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:36:38,831 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:36:38,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:36:38,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:36:38,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:36:38,832 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:36:38,832 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:36:38,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390598830Disabling compacts and flushes for region at 1732390598830Disabling writes for close at 1732390598831 (+1 ms)Writing region close event to WAL at 1732390598832 (+1 ms)Closed at 1732390598832 2024-11-23T19:36:38,833 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:36:38,834 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:38,834 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:36:38,835 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:36:38,837 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:36:38,838 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:36:38,841 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:36:38,841 DEBUG [RS:0;387b213c044a:34323 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ab559e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:36:38,859 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:34323 2024-11-23T19:36:38,860 INFO [RS:0;387b213c044a:34323 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:36:38,860 INFO [RS:0;387b213c044a:34323 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:36:38,860 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:36:38,861 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,37181,1732390598172 with port=34323, startcode=1732390598359 2024-11-23T19:36:38,861 DEBUG [RS:0;387b213c044a:34323 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:36:38,864 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33791, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:36:38,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37181 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,34323,1732390598359 2024-11-23T19:36:38,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37181 {}] master.ServerManager(517): Registering regionserver=387b213c044a,34323,1732390598359 2024-11-23T19:36:38,868 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0 2024-11-23T19:36:38,868 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36231 2024-11-23T19:36:38,868 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:36:38,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:36:38,881 DEBUG [RS:0;387b213c044a:34323 {}] zookeeper.ZKUtil(111): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,34323,1732390598359 2024-11-23T19:36:38,881 WARN [RS:0;387b213c044a:34323 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:36:38,882 INFO [RS:0;387b213c044a:34323 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:38,882 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/WALs/387b213c044a,34323,1732390598359 2024-11-23T19:36:38,882 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,34323,1732390598359] 2024-11-23T19:36:38,886 INFO [RS:0;387b213c044a:34323 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:36:38,889 INFO [RS:0;387b213c044a:34323 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:36:38,889 INFO [RS:0;387b213c044a:34323 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:36:38,889 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,889 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:36:38,890 INFO [RS:0;387b213c044a:34323 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:36:38,891 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,891 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,892 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:38,892 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:36:38,892 DEBUG [RS:0;387b213c044a:34323 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:36:38,895 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,896 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,896 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,896 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,896 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,896 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,34323,1732390598359-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:36:38,910 INFO [RS:0;387b213c044a:34323 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:36:38,910 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,34323,1732390598359-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,911 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,911 INFO [RS:0;387b213c044a:34323 {}] regionserver.Replication(171): 387b213c044a,34323,1732390598359 started 2024-11-23T19:36:38,925 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:38,925 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,34323,1732390598359, RpcServer on 387b213c044a/172.17.0.3:34323, sessionid=0x1016932b1b60001 2024-11-23T19:36:38,925 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:36:38,925 DEBUG [RS:0;387b213c044a:34323 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,34323,1732390598359 2024-11-23T19:36:38,925 DEBUG [RS:0;387b213c044a:34323 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,34323,1732390598359' 2024-11-23T19:36:38,925 DEBUG [RS:0;387b213c044a:34323 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:36:38,926 DEBUG [RS:0;387b213c044a:34323 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:36:38,927 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:36:38,927 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:36:38,927 DEBUG [RS:0;387b213c044a:34323 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,34323,1732390598359 2024-11-23T19:36:38,927 DEBUG [RS:0;387b213c044a:34323 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,34323,1732390598359' 2024-11-23T19:36:38,927 DEBUG [RS:0;387b213c044a:34323 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:36:38,928 DEBUG [RS:0;387b213c044a:34323 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:36:38,928 DEBUG [RS:0;387b213c044a:34323 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:36:38,928 INFO [RS:0;387b213c044a:34323 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:36:38,928 INFO [RS:0;387b213c044a:34323 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:36:38,988 WARN [387b213c044a:37181 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T19:36:39,031 INFO [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C34323%2C1732390598359, suffix=, logDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/WALs/387b213c044a,34323,1732390598359, archiveDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/oldWALs, maxLogs=32 2024-11-23T19:36:39,033 INFO [RS:0;387b213c044a:34323 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C34323%2C1732390598359.1732390599032 2024-11-23T19:36:39,039 INFO [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/WALs/387b213c044a,34323,1732390598359/387b213c044a%2C34323%2C1732390598359.1732390599032 2024-11-23T19:36:39,040 DEBUG [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38935:38935),(127.0.0.1/127.0.0.1:33985:33985)] 2024-11-23T19:36:39,239 DEBUG [387b213c044a:37181 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:36:39,241 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,34323,1732390598359 2024-11-23T19:36:39,246 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,34323,1732390598359, state=OPENING 2024-11-23T19:36:39,299 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:36:39,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:39,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:39,313 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:39,313 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:39,313 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:36:39,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,34323,1732390598359}] 2024-11-23T19:36:39,468 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:36:39,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45373, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:36:39,478 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:36:39,479 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:39,481 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C34323%2C1732390598359.meta, suffix=.meta, logDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/WALs/387b213c044a,34323,1732390598359, archiveDir=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/oldWALs, maxLogs=32 2024-11-23T19:36:39,484 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C34323%2C1732390598359.meta.1732390599484.meta 2024-11-23T19:36:39,491 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/WALs/387b213c044a,34323,1732390598359/387b213c044a%2C34323%2C1732390598359.meta.1732390599484.meta 2024-11-23T19:36:39,496 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38935:38935),(127.0.0.1/127.0.0.1:33985:33985)] 2024-11-23T19:36:39,499 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:36:39,499 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:36:39,499 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:36:39,499 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:36:39,500 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:36:39,500 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:39,500 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:36:39,500 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:36:39,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:36:39,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:36:39,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:39,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:39,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:36:39,505 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:36:39,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:39,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:39,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:36:39,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:36:39,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:39,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:39,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:36:39,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:36:39,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:39,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:39,511 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:36:39,512 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740 2024-11-23T19:36:39,513 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740 2024-11-23T19:36:39,515 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:36:39,515 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:36:39,516 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:36:39,518 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:36:39,519 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=720026, jitterRate=-0.08444012701511383}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:36:39,519 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:36:39,520 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390599500Writing region info on filesystem at 1732390599500Initializing all the Stores at 1732390599502 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390599502Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390599502Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390599502Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390599502Cleaning up temporary data from old regions at 1732390599515 (+13 ms)Running coprocessor post-open hooks at 1732390599519 (+4 ms)Region opened successfully at 1732390599520 (+1 ms) 2024-11-23T19:36:39,522 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390599467 2024-11-23T19:36:39,525 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:36:39,525 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:36:39,527 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,34323,1732390598359 2024-11-23T19:36:39,528 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,34323,1732390598359, state=OPEN 2024-11-23T19:36:39,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:36:39,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:36:39,668 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,34323,1732390598359 2024-11-23T19:36:39,668 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:39,668 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:39,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:36:39,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,34323,1732390598359 in 355 msec 2024-11-23T19:36:39,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:36:39,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 838 msec 2024-11-23T19:36:39,677 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:39,677 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:36:39,679 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:36:39,679 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,34323,1732390598359, seqNum=-1] 2024-11-23T19:36:39,680 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:36:39,681 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36637, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:36:39,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 918 msec 2024-11-23T19:36:39,691 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390599691, completionTime=-1 2024-11-23T19:36:39,691 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:36:39,691 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:36:39,694 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:36:39,694 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390659694 2024-11-23T19:36:39,694 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390719694 2024-11-23T19:36:39,694 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-11-23T19:36:39,695 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37181,1732390598172-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,695 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37181,1732390598172-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,695 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37181,1732390598172-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,695 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:37181, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,695 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,695 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,699 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.241sec 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37181,1732390598172-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:36:39,702 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37181,1732390598172-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:36:39,705 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:36:39,705 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:36:39,705 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37181,1732390598172-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:39,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6512930c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:36:39,718 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,37181,-1 for getting cluster id 2024-11-23T19:36:39,718 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:36:39,720 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e5039eea-e9ea-4763-9eaa-3ba3380d7e30' 2024-11-23T19:36:39,721 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:36:39,721 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e5039eea-e9ea-4763-9eaa-3ba3380d7e30" 2024-11-23T19:36:39,721 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a2976b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:36:39,721 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,37181,-1] 2024-11-23T19:36:39,722 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:36:39,724 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:39,726 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42652, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:36:39,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd69191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:36:39,727 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:36:39,729 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,34323,1732390598359, seqNum=-1] 2024-11-23T19:36:39,729 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:36:39,731 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52818, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:36:39,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,37181,1732390598172 2024-11-23T19:36:39,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:39,738 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:36:39,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:36:39,738 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:36:39,738 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:36:39,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:39,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:39,738 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:36:39,739 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:36:39,739 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1385371268, stopped=false 2024-11-23T19:36:39,739 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,37181,1732390598172 2024-11-23T19:36:39,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:39,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:39,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:39,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:39,762 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:36:39,762 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:36:39,762 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:39,762 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:36:39,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:39,763 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,34323,1732390598359' ***** 2024-11-23T19:36:39,763 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:36:39,763 INFO [RS:0;387b213c044a:34323 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:36:39,763 INFO [RS:0;387b213c044a:34323 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:36:39,763 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:36:39,763 INFO [RS:0;387b213c044a:34323 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:36:39,763 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,34323,1732390598359 2024-11-23T19:36:39,763 INFO [RS:0;387b213c044a:34323 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:36:39,763 INFO [RS:0;387b213c044a:34323 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:34323. 2024-11-23T19:36:39,764 DEBUG [RS:0;387b213c044a:34323 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:36:39,764 DEBUG [RS:0;387b213c044a:34323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:39,764 INFO [RS:0;387b213c044a:34323 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:36:39,764 INFO [RS:0;387b213c044a:34323 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:36:39,764 INFO [RS:0;387b213c044a:34323 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:36:39,764 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:39,764 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:36:39,764 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T19:36:39,764 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T19:36:39,764 DEBUG [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-23T19:36:39,765 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:36:39,765 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:36:39,765 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:36:39,765 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:36:39,765 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:36:39,765 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-23T19:36:39,783 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/.tmp/ns/96c04ee43d3e479e954071f588c328a9 is 43, key is default/ns:d/1732390599682/Put/seqid=0 2024-11-23T19:36:39,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741835_1011 (size=5153) 2024-11-23T19:36:39,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741835_1011 (size=5153) 2024-11-23T19:36:39,789 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/.tmp/ns/96c04ee43d3e479e954071f588c328a9 2024-11-23T19:36:39,798 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/.tmp/ns/96c04ee43d3e479e954071f588c328a9 as hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/ns/96c04ee43d3e479e954071f588c328a9 2024-11-23T19:36:39,807 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/ns/96c04ee43d3e479e954071f588c328a9, entries=2, sequenceid=6, filesize=5.0 K 2024-11-23T19:36:39,809 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-23T19:36:39,809 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T19:36:39,815 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T19:36:39,816 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:36:39,817 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:36:39,817 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390599764Running coprocessor pre-close hooks at 1732390599764Disabling compacts and flushes for region at 1732390599764Disabling writes for close at 1732390599765 (+1 ms)Obtaining lock to block concurrent updates at 1732390599765Preparing flush snapshotting stores in 1588230740 at 1732390599765Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732390599765Flushing stores of hbase:meta,,1.1588230740 at 1732390599766 (+1 ms)Flushing 1588230740/ns: creating writer at 1732390599766Flushing 1588230740/ns: appending metadata at 1732390599782 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732390599782Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@356aed05: reopening flushed file at 1732390599797 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1732390599809 (+12 ms)Writing region close event to WAL at 1732390599811 (+2 ms)Running coprocessor post-close hooks at 1732390599816 (+5 ms)Closed at 1732390599816 2024-11-23T19:36:39,817 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:36:39,908 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T19:36:39,908 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T19:36:39,965 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,34323,1732390598359; all regions closed. 2024-11-23T19:36:39,965 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741834_1010 (size=1152) 2024-11-23T19:36:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741834_1010 (size=1152) 2024-11-23T19:36:39,975 DEBUG [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/oldWALs 2024-11-23T19:36:39,975 INFO [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C34323%2C1732390598359.meta:.meta(num 1732390599484) 2024-11-23T19:36:39,976 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,976 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,976 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,976 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,976 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:39,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741833_1009 (size=93) 2024-11-23T19:36:39,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741833_1009 (size=93) 2024-11-23T19:36:39,981 DEBUG [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/oldWALs 2024-11-23T19:36:39,981 INFO [RS:0;387b213c044a:34323 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C34323%2C1732390598359:(num 1732390599032) 2024-11-23T19:36:39,981 DEBUG [RS:0;387b213c044a:34323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:39,981 INFO [RS:0;387b213c044a:34323 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:36:39,981 INFO [RS:0;387b213c044a:34323 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:36:39,982 INFO [RS:0;387b213c044a:34323 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:36:39,982 INFO [RS:0;387b213c044a:34323 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:36:39,982 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:36:39,982 INFO [RS:0;387b213c044a:34323 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34323 2024-11-23T19:36:39,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:36:39,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,34323,1732390598359 2024-11-23T19:36:39,996 INFO [RS:0;387b213c044a:34323 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:36:40,007 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,34323,1732390598359] 2024-11-23T19:36:40,017 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,34323,1732390598359 already deleted, retry=false 2024-11-23T19:36:40,017 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,34323,1732390598359 expired; onlineServers=0 2024-11-23T19:36:40,018 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,37181,1732390598172' ***** 2024-11-23T19:36:40,018 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:36:40,018 INFO [M:0;387b213c044a:37181 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:36:40,018 INFO [M:0;387b213c044a:37181 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:36:40,018 DEBUG [M:0;387b213c044a:37181 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:36:40,018 DEBUG [M:0;387b213c044a:37181 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:36:40,018 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:36:40,018 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390598779 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390598779,5,FailOnTimeoutGroup] 2024-11-23T19:36:40,018 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390598779 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390598779,5,FailOnTimeoutGroup] 2024-11-23T19:36:40,019 INFO [M:0;387b213c044a:37181 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:36:40,019 INFO [M:0;387b213c044a:37181 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:36:40,019 DEBUG [M:0;387b213c044a:37181 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:36:40,020 INFO [M:0;387b213c044a:37181 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:36:40,020 INFO [M:0;387b213c044a:37181 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:36:40,020 INFO [M:0;387b213c044a:37181 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:36:40,020 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:36:40,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:36:40,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:40,028 DEBUG [M:0;387b213c044a:37181 {}] zookeeper.ZKUtil(347): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:36:40,029 WARN [M:0;387b213c044a:37181 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:36:40,030 INFO [M:0;387b213c044a:37181 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/.lastflushedseqids 2024-11-23T19:36:40,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741836_1012 (size=99) 2024-11-23T19:36:40,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741836_1012 (size=99) 2024-11-23T19:36:40,040 INFO [M:0;387b213c044a:37181 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:36:40,040 INFO [M:0;387b213c044a:37181 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:36:40,040 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:36:40,040 INFO [M:0;387b213c044a:37181 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:40,040 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:40,040 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:36:40,040 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:40,041 INFO [M:0;387b213c044a:37181 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-23T19:36:40,060 DEBUG [M:0;387b213c044a:37181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4bfdbc85a0d74f779b9279ad192c7994 is 82, key is hbase:meta,,1/info:regioninfo/1732390599526/Put/seqid=0 2024-11-23T19:36:40,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741837_1013 (size=5672) 2024-11-23T19:36:40,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741837_1013 (size=5672) 2024-11-23T19:36:40,066 INFO [M:0;387b213c044a:37181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4bfdbc85a0d74f779b9279ad192c7994 2024-11-23T19:36:40,087 DEBUG [M:0;387b213c044a:37181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5750c05728ee4ae3a41ea961934d1ce7 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732390599689/Put/seqid=0 2024-11-23T19:36:40,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741838_1014 (size=5275) 2024-11-23T19:36:40,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741838_1014 (size=5275) 2024-11-23T19:36:40,093 INFO [M:0;387b213c044a:37181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5750c05728ee4ae3a41ea961934d1ce7 2024-11-23T19:36:40,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:40,107 INFO [RS:0;387b213c044a:34323 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:36:40,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34323-0x1016932b1b60001, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:40,107 INFO [RS:0;387b213c044a:34323 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,34323,1732390598359; zookeeper connection closed. 2024-11-23T19:36:40,108 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@ffd68d2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@ffd68d2 2024-11-23T19:36:40,108 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T19:36:40,115 DEBUG [M:0;387b213c044a:37181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/244f50f7cf724007a704826e38c20c5d is 69, key is 387b213c044a,34323,1732390598359/rs:state/1732390598866/Put/seqid=0 2024-11-23T19:36:40,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741839_1015 (size=5156) 2024-11-23T19:36:40,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741839_1015 (size=5156) 2024-11-23T19:36:40,120 INFO [M:0;387b213c044a:37181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/244f50f7cf724007a704826e38c20c5d 2024-11-23T19:36:40,141 DEBUG [M:0;387b213c044a:37181 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52ce1ac1481d4aa88f572420bd7e9b63 is 52, key is load_balancer_on/state:d/1732390599736/Put/seqid=0 2024-11-23T19:36:40,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741840_1016 (size=5056) 2024-11-23T19:36:40,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741840_1016 (size=5056) 2024-11-23T19:36:40,147 INFO [M:0;387b213c044a:37181 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52ce1ac1481d4aa88f572420bd7e9b63 2024-11-23T19:36:40,154 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4bfdbc85a0d74f779b9279ad192c7994 as hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4bfdbc85a0d74f779b9279ad192c7994 2024-11-23T19:36:40,161 INFO [M:0;387b213c044a:37181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4bfdbc85a0d74f779b9279ad192c7994, entries=8, sequenceid=29, filesize=5.5 K 2024-11-23T19:36:40,163 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5750c05728ee4ae3a41ea961934d1ce7 as hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5750c05728ee4ae3a41ea961934d1ce7 2024-11-23T19:36:40,169 INFO [M:0;387b213c044a:37181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5750c05728ee4ae3a41ea961934d1ce7, entries=3, sequenceid=29, filesize=5.2 K 2024-11-23T19:36:40,170 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/244f50f7cf724007a704826e38c20c5d as hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/244f50f7cf724007a704826e38c20c5d 2024-11-23T19:36:40,177 INFO [M:0;387b213c044a:37181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/244f50f7cf724007a704826e38c20c5d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-23T19:36:40,179 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52ce1ac1481d4aa88f572420bd7e9b63 as hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/52ce1ac1481d4aa88f572420bd7e9b63 2024-11-23T19:36:40,185 INFO [M:0;387b213c044a:37181 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36231/user/jenkins/test-data/5dc51d19-b062-2101-8d33-8f45cb9c52d0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/52ce1ac1481d4aa88f572420bd7e9b63, entries=1, sequenceid=29, filesize=4.9 K 2024-11-23T19:36:40,186 INFO [M:0;387b213c044a:37181 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-11-23T19:36:40,188 INFO [M:0;387b213c044a:37181 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:40,188 DEBUG [M:0;387b213c044a:37181 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390600040Disabling compacts and flushes for region at 1732390600040Disabling writes for close at 1732390600040Obtaining lock to block concurrent updates at 1732390600041 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390600041Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732390600041Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390600042 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390600042Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390600059 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390600059Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390600072 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390600087 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390600087Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390600099 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390600114 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390600114Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390600126 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390600141 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390600141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75cd2e12: reopening flushed file at 1732390600153 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62622101: reopening flushed file at 1732390600161 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57b46afb: reopening flushed file at 1732390600169 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@199ba85b: reopening flushed file at 1732390600177 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1732390600186 (+9 ms)Writing region close event to WAL at 1732390600188 (+2 ms)Closed at 1732390600188 2024-11-23T19:36:40,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:40,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:40,189 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:40,189 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:40,189 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:40,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44095 is added to blk_1073741830_1006 (size=10311) 2024-11-23T19:36:40,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39887 is added to blk_1073741830_1006 (size=10311) 2024-11-23T19:36:40,192 INFO [M:0;387b213c044a:37181 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:36:40,192 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:36:40,192 INFO [M:0;387b213c044a:37181 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37181 2024-11-23T19:36:40,192 INFO [M:0;387b213c044a:37181 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:36:40,471 INFO [M:0;387b213c044a:37181 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:36:40,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:40,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37181-0x1016932b1b60000, quorum=127.0.0.1:53621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:36:40,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da5059a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:40,508 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:40,508 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:40,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:40,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:40,512 WARN [BP-2131376814-172.17.0.3-1732390595240 heartbeating to localhost/127.0.0.1:36231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:40,512 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:40,512 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:40,512 WARN [BP-2131376814-172.17.0.3-1732390595240 heartbeating to localhost/127.0.0.1:36231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2131376814-172.17.0.3-1732390595240 (Datanode Uuid 53432c00-be6e-4f4f-9ab5-dd67ff79e9fa) service to localhost/127.0.0.1:36231 2024-11-23T19:36:40,514 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data3/current/BP-2131376814-172.17.0.3-1732390595240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:40,514 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data4/current/BP-2131376814-172.17.0.3-1732390595240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:40,514 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:40,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4595827f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:40,519 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:40,519 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:40,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:40,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:40,521 WARN [BP-2131376814-172.17.0.3-1732390595240 heartbeating to localhost/127.0.0.1:36231 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:40,522 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:40,522 WARN [BP-2131376814-172.17.0.3-1732390595240 heartbeating to localhost/127.0.0.1:36231 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2131376814-172.17.0.3-1732390595240 (Datanode Uuid bcecba81-64f0-420b-94c4-a2cab477d4fb) service to localhost/127.0.0.1:36231 2024-11-23T19:36:40,522 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:40,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data1/current/BP-2131376814-172.17.0.3-1732390595240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:40,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/cluster_a98886a6-31e9-1aa1-9e01-0ea971b30812/data/data2/current/BP-2131376814-172.17.0.3-1732390595240 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:40,522 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:40,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:36:40,528 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:40,528 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:40,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:40,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:40,533 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.log.dir so I do NOT create it in target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/72731f8e-446c-1fa5-88d1-60896171d4e0/hadoop.tmp.dir so I do NOT create it in target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647, deleteOnExit=true 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:36:40,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/test.cache.data in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:36:40,551 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:36:40,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:36:40,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:36:40,565 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:36:40,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:40,893 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:40,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:40,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:40,896 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:36:40,896 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:36:40,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:40,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:40,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:40,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@94a50db{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-42301-hadoop-hdfs-3_4_1-tests_jar-_-any-5014027639743889671/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:36:40,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:42301} 2024-11-23T19:36:40,996 INFO [Time-limited test {}] server.Server(415): Started @113527ms 2024-11-23T19:36:41,008 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:36:41,246 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:41,251 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:41,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:41,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:41,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:36:41,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68a89b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:41,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cb9bebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:41,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d327fd2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-39421-hadoop-hdfs-3_4_1-tests_jar-_-any-16170593394387377320/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:41,347 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c80aceb{HTTP/1.1, (http/1.1)}{localhost:39421} 2024-11-23T19:36:41,347 INFO [Time-limited test {}] server.Server(415): Started @113877ms 2024-11-23T19:36:41,348 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:41,375 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:41,379 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:41,380 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:41,380 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:41,380 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:36:41,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcfaad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:41,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7f19bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:41,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@597807df{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-35395-hadoop-hdfs-3_4_1-tests_jar-_-any-16655443591051043787/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:41,477 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@401bd933{HTTP/1.1, (http/1.1)}{localhost:35395} 2024-11-23T19:36:41,477 INFO [Time-limited test {}] server.Server(415): Started @114008ms 2024-11-23T19:36:41,479 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:42,451 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data1/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:42,451 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data2/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:42,473 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:42,475 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98e1b13c92b8cac4 with lease ID 0xb6b5517095c97df5: Processing first storage report for DS-ececda8f-d48e-479e-ab76-ea68bd433370 from datanode DatanodeRegistration(127.0.0.1:37185, datanodeUuid=ff45096b-2949-4c72-b1a7-5f9fed6e7090, infoPort=41917, infoSecurePort=0, ipcPort=35157, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:42,475 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98e1b13c92b8cac4 with lease ID 0xb6b5517095c97df5: from storage DS-ececda8f-d48e-479e-ab76-ea68bd433370 node DatanodeRegistration(127.0.0.1:37185, datanodeUuid=ff45096b-2949-4c72-b1a7-5f9fed6e7090, infoPort=41917, infoSecurePort=0, ipcPort=35157, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:42,475 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x98e1b13c92b8cac4 with lease ID 0xb6b5517095c97df5: Processing first storage report for DS-965ea196-4709-47c1-a5e1-df539d24fb8e from datanode DatanodeRegistration(127.0.0.1:37185, datanodeUuid=ff45096b-2949-4c72-b1a7-5f9fed6e7090, infoPort=41917, infoSecurePort=0, ipcPort=35157, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98e1b13c92b8cac4 with lease ID 0xb6b5517095c97df5: from storage DS-965ea196-4709-47c1-a5e1-df539d24fb8e node DatanodeRegistration(127.0.0.1:37185, datanodeUuid=ff45096b-2949-4c72-b1a7-5f9fed6e7090, infoPort=41917, infoSecurePort=0, ipcPort=35157, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:42,602 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data3/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:42,602 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data4/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:42,626 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:42,628 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9b6580a68ac476f with lease ID 0xb6b5517095c97df6: Processing first storage report for DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e from datanode DatanodeRegistration(127.0.0.1:34927, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=41595, infoSecurePort=0, ipcPort=34347, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:42,628 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9b6580a68ac476f with lease ID 0xb6b5517095c97df6: from storage DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e node DatanodeRegistration(127.0.0.1:34927, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=41595, infoSecurePort=0, ipcPort=34347, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:42,628 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9b6580a68ac476f with lease ID 0xb6b5517095c97df6: Processing first storage report for DS-3a7b9d5c-2092-41df-810b-85a1fbcbb2a7 from datanode DatanodeRegistration(127.0.0.1:34927, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=41595, infoSecurePort=0, ipcPort=34347, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:42,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9b6580a68ac476f with lease ID 0xb6b5517095c97df6: from storage DS-3a7b9d5c-2092-41df-810b-85a1fbcbb2a7 node DatanodeRegistration(127.0.0.1:34927, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=41595, infoSecurePort=0, ipcPort=34347, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:42,722 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55 2024-11-23T19:36:42,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/zookeeper_0, clientPort=56596, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:36:42,726 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56596 2024-11-23T19:36:42,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:42,728 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:42,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:36:42,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:36:42,739 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed with version=8 2024-11-23T19:36:42,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:36:42,742 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:36:42,742 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:36:42,743 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35305 2024-11-23T19:36:42,745 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35305 connecting to ZooKeeper ensemble=127.0.0.1:56596 2024-11-23T19:36:42,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:353050x0, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:36:42,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35305-0x1016932c3900000 connected 2024-11-23T19:36:42,891 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:42,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:42,899 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:42,899 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed, hbase.cluster.distributed=false 2024-11-23T19:36:42,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:36:42,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35305 2024-11-23T19:36:42,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35305 2024-11-23T19:36:42,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35305 2024-11-23T19:36:42,904 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35305 2024-11-23T19:36:42,904 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35305 2024-11-23T19:36:42,920 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:36:42,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:42,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:42,920 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:36:42,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:42,921 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:36:42,921 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:36:42,921 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:36:42,921 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44195 2024-11-23T19:36:42,923 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44195 connecting to ZooKeeper ensemble=127.0.0.1:56596 2024-11-23T19:36:42,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:42,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:42,940 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441950x0, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:36:42,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44195-0x1016932c3900001 connected 2024-11-23T19:36:42,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:36:42,941 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:36:42,942 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:36:42,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:36:42,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:36:42,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44195 2024-11-23T19:36:42,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44195 2024-11-23T19:36:42,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44195 2024-11-23T19:36:42,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44195 2024-11-23T19:36:42,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44195 2024-11-23T19:36:42,958 DEBUG [M:0;387b213c044a:35305 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:35305 2024-11-23T19:36:42,958 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,35305,1732390602741 2024-11-23T19:36:42,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:42,964 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:42,965 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,35305,1732390602741 2024-11-23T19:36:42,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:42,975 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:36:42,975 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:42,976 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:36:42,976 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,35305,1732390602741 from backup master directory 2024-11-23T19:36:42,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,35305,1732390602741 2024-11-23T19:36:42,986 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:42,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:36:42,986 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:36:42,986 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,35305,1732390602741 2024-11-23T19:36:42,994 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/hbase.id] with ID: 4c999074-bcc7-48e1-be4b-fd986739057e 2024-11-23T19:36:42,994 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/.tmp/hbase.id 2024-11-23T19:36:43,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:36:43,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:36:43,001 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/.tmp/hbase.id]:[hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/hbase.id] 2024-11-23T19:36:43,017 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:43,017 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:36:43,019 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T19:36:43,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,028 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:36:43,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:36:43,036 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:36:43,036 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:36:43,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:43,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:36:43,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:36:43,046 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store 2024-11-23T19:36:43,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:36:43,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:36:43,054 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:43,054 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:36:43,055 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:43,055 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:43,055 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:36:43,055 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:43,055 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:36:43,055 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390603054Disabling compacts and flushes for region at 1732390603054Disabling writes for close at 1732390603055 (+1 ms)Writing region close event to WAL at 1732390603055Closed at 1732390603055 2024-11-23T19:36:43,056 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/.initializing 2024-11-23T19:36:43,056 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741 2024-11-23T19:36:43,059 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C35305%2C1732390602741, suffix=, logDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741, archiveDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/oldWALs, maxLogs=10 2024-11-23T19:36:43,060 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C35305%2C1732390602741.1732390603059 2024-11-23T19:36:43,065 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 2024-11-23T19:36:43,066 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41595:41595),(127.0.0.1/127.0.0.1:41917:41917)] 2024-11-23T19:36:43,066 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:36:43,067 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:43,067 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,067 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:36:43,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:36:43,073 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:43,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:36:43,075 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:43,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:36:43,077 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:43,078 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,078 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,079 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,080 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,080 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,080 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:36:43,081 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:36:43,084 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:36:43,084 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853276, jitterRate=0.08499729633331299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:36:43,085 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390603067Initializing all the Stores at 1732390603068 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603068Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390603069 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390603069Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390603069Cleaning up temporary data from old regions at 1732390603080 (+11 ms)Region opened successfully at 1732390603085 (+5 ms) 2024-11-23T19:36:43,086 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:36:43,089 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7bb0e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:36:43,090 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:36:43,091 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:36:43,091 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:36:43,091 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:36:43,092 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T19:36:43,092 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T19:36:43,092 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:36:43,094 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:36:43,095 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:36:43,101 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:36:43,102 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:36:43,102 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:36:43,112 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:36:43,112 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:36:43,113 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:36:43,122 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:36:43,124 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:36:43,133 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:36:43,138 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:36:43,151 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:36:43,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:43,165 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:36:43,165 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,167 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,35305,1732390602741, sessionid=0x1016932c3900000, setting cluster-up flag (Was=false) 2024-11-23T19:36:43,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,185 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,217 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:36:43,219 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,35305,1732390602741 2024-11-23T19:36:43,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,238 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,270 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:36:43,271 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,35305,1732390602741 2024-11-23T19:36:43,272 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:36:43,274 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:43,274 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:36:43,274 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:36:43,274 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,35305,1732390602741 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:36:43,276 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390633277 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:36:43,277 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,278 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:43,278 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:36:43,278 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:36:43,278 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:36:43,278 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:36:43,278 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:36:43,278 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:36:43,278 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390603278,5,FailOnTimeoutGroup] 2024-11-23T19:36:43,278 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390603278,5,FailOnTimeoutGroup] 2024-11-23T19:36:43,278 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,279 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:36:43,279 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,279 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,279 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,279 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:36:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:36:43,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:36:43,288 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:36:43,288 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed 2024-11-23T19:36:43,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:36:43,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:36:43,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:43,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:36:43,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:36:43,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:36:43,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:36:43,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:36:43,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:36:43,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:36:43,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:36:43,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:36:43,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740 2024-11-23T19:36:43,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740 2024-11-23T19:36:43,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:36:43,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:36:43,310 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:36:43,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:36:43,313 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:36:43,314 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756579, jitterRate=-0.03796099126338959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:36:43,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390603296Initializing all the Stores at 1732390603297 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603297Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603297Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390603297Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603297Cleaning up temporary data from old regions at 1732390603310 (+13 ms)Region opened successfully at 1732390603314 (+4 ms) 2024-11-23T19:36:43,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:36:43,315 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:36:43,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:36:43,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:36:43,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:36:43,315 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:36:43,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390603314Disabling compacts and flushes for region at 1732390603314Disabling writes for close at 1732390603315 (+1 ms)Writing region close event to WAL at 1732390603315Closed at 1732390603315 2024-11-23T19:36:43,316 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:43,316 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:36:43,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:36:43,318 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:36:43,319 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:36:43,347 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(746): ClusterId : 4c999074-bcc7-48e1-be4b-fd986739057e 2024-11-23T19:36:43,347 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:36:43,355 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:36:43,355 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:36:43,365 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:36:43,366 DEBUG [RS:0;387b213c044a:44195 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48d15746, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:36:43,377 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:44195 2024-11-23T19:36:43,377 INFO [RS:0;387b213c044a:44195 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:36:43,377 INFO [RS:0;387b213c044a:44195 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:36:43,377 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:36:43,378 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,35305,1732390602741 with port=44195, startcode=1732390602920 2024-11-23T19:36:43,379 DEBUG [RS:0;387b213c044a:44195 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:36:43,381 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47691, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:36:43,381 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35305 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,44195,1732390602920 2024-11-23T19:36:43,381 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35305 {}] master.ServerManager(517): Registering regionserver=387b213c044a,44195,1732390602920 2024-11-23T19:36:43,383 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed 2024-11-23T19:36:43,383 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39041 2024-11-23T19:36:43,383 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:36:43,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:36:43,397 DEBUG [RS:0;387b213c044a:44195 {}] zookeeper.ZKUtil(111): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,44195,1732390602920 2024-11-23T19:36:43,397 WARN [RS:0;387b213c044a:44195 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:36:43,397 INFO [RS:0;387b213c044a:44195 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:43,397 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920 2024-11-23T19:36:43,397 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,44195,1732390602920] 2024-11-23T19:36:43,401 INFO [RS:0;387b213c044a:44195 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:36:43,404 INFO [RS:0;387b213c044a:44195 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:36:43,406 INFO [RS:0;387b213c044a:44195 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:36:43,406 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,407 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:36:43,408 INFO [RS:0;387b213c044a:44195 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:36:43,408 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,408 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,408 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,408 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,408 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,408 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:36:43,409 DEBUG [RS:0;387b213c044a:44195 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:36:43,411 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,411 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,411 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,411 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,411 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,411 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,44195,1732390602920-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:36:43,436 INFO [RS:0;387b213c044a:44195 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:36:43,436 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,44195,1732390602920-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,437 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,437 INFO [RS:0;387b213c044a:44195 {}] regionserver.Replication(171): 387b213c044a,44195,1732390602920 started 2024-11-23T19:36:43,470 WARN [387b213c044a:35305 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T19:36:43,475 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:43,475 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,44195,1732390602920, RpcServer on 387b213c044a/172.17.0.3:44195, sessionid=0x1016932c3900001 2024-11-23T19:36:43,475 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:36:43,475 DEBUG [RS:0;387b213c044a:44195 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,44195,1732390602920 2024-11-23T19:36:43,475 DEBUG [RS:0;387b213c044a:44195 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,44195,1732390602920' 2024-11-23T19:36:43,475 DEBUG [RS:0;387b213c044a:44195 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:36:43,476 DEBUG [RS:0;387b213c044a:44195 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:36:43,477 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:36:43,477 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:36:43,477 DEBUG [RS:0;387b213c044a:44195 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,44195,1732390602920 2024-11-23T19:36:43,477 DEBUG [RS:0;387b213c044a:44195 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,44195,1732390602920' 2024-11-23T19:36:43,477 DEBUG [RS:0;387b213c044a:44195 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:36:43,477 DEBUG [RS:0;387b213c044a:44195 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:36:43,478 DEBUG [RS:0;387b213c044a:44195 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:36:43,478 INFO [RS:0;387b213c044a:44195 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:36:43,478 INFO [RS:0;387b213c044a:44195 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:36:43,582 INFO [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C44195%2C1732390602920, suffix=, logDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920, archiveDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs, maxLogs=32 2024-11-23T19:36:43,583 INFO [RS:0;387b213c044a:44195 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.1732390603583 2024-11-23T19:36:43,592 INFO [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 2024-11-23T19:36:43,596 DEBUG [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:41595:41595)] 2024-11-23T19:36:43,720 DEBUG [387b213c044a:35305 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:36:43,721 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,44195,1732390602920 2024-11-23T19:36:43,725 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,44195,1732390602920, state=OPENING 2024-11-23T19:36:43,772 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:36:43,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,782 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:36:43,783 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:36:43,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,44195,1732390602920}] 2024-11-23T19:36:43,783 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:43,784 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:43,937 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:36:43,938 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:36:43,943 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:36:43,943 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:43,945 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C44195%2C1732390602920.meta, suffix=.meta, logDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920, archiveDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs, maxLogs=32 2024-11-23T19:36:43,946 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta 2024-11-23T19:36:43,953 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta 2024-11-23T19:36:43,956 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:41595:41595)] 2024-11-23T19:36:43,957 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:36:43,957 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:36:43,957 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:36:43,957 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:36:43,957 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:36:43,957 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:43,958 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:36:43,958 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:36:43,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:36:43,960 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:36:43,960 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:36:43,962 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:36:43,962 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:36:43,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:36:43,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:36:43,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:36:43,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:43,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:36:43,965 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:36:43,966 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740 2024-11-23T19:36:43,967 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740 2024-11-23T19:36:43,969 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:36:43,969 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:36:43,969 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:36:43,970 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:36:43,971 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873276, jitterRate=0.11042861640453339}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:36:43,971 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:36:43,972 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390603958Writing region info on filesystem at 1732390603958Initializing all the Stores at 1732390603959 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603959Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603959Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390603959Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390603959Cleaning up temporary data from old regions at 1732390603969 (+10 ms)Running coprocessor post-open hooks at 1732390603971 (+2 ms)Region opened successfully at 1732390603972 (+1 ms) 2024-11-23T19:36:43,973 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390603936 2024-11-23T19:36:43,976 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:36:43,976 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:36:43,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,44195,1732390602920 2024-11-23T19:36:43,977 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,44195,1732390602920, state=OPEN 2024-11-23T19:36:44,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:36:44,012 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:36:44,012 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,44195,1732390602920 2024-11-23T19:36:44,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:44,012 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:36:44,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:36:44,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,44195,1732390602920 in 229 msec 2024-11-23T19:36:44,018 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:36:44,018 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 698 msec 2024-11-23T19:36:44,019 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:36:44,019 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:36:44,021 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:36:44,021 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,44195,1732390602920, seqNum=-1] 2024-11-23T19:36:44,021 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:36:44,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56345, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:36:44,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 755 msec 2024-11-23T19:36:44,030 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390604030, completionTime=-1 2024-11-23T19:36:44,031 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:36:44,031 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:36:44,032 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:36:44,032 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390664032 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390724032 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,35305,1732390602741-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,35305,1732390602741-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,35305,1732390602741-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:35305, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,033 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,035 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.051sec 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,35305,1732390602741-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:36:44,037 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,35305,1732390602741-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:36:44,039 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:36:44,039 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:36:44,039 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,35305,1732390602741-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,047 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1744a862, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:36:44,048 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,35305,-1 for getting cluster id 2024-11-23T19:36:44,048 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:36:44,049 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4c999074-bcc7-48e1-be4b-fd986739057e' 2024-11-23T19:36:44,050 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:36:44,050 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4c999074-bcc7-48e1-be4b-fd986739057e" 2024-11-23T19:36:44,050 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff0542e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:36:44,050 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,35305,-1] 2024-11-23T19:36:44,051 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:36:44,051 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:36:44,053 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53486, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:36:44,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7982f2bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:36:44,054 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:36:44,056 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,44195,1732390602920, seqNum=-1] 2024-11-23T19:36:44,056 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:36:44,058 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53334, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:36:44,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,35305,1732390602741 2024-11-23T19:36:44,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:44,064 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:36:44,084 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:36:44,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:44,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:44,084 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:36:44,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:36:44,085 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:36:44,085 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:36:44,085 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:36:44,085 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37383 2024-11-23T19:36:44,087 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37383 connecting to ZooKeeper ensemble=127.0.0.1:56596 2024-11-23T19:36:44,088 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:44,089 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:36:44,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373830x0, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:36:44,113 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37383-0x1016932c3900002 connected 2024-11-23T19:36:44,113 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-23T19:36:44,113 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-23T19:36:44,114 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:36:44,115 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:36:44,116 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:36:44,118 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:36:44,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37383 2024-11-23T19:36:44,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37383 2024-11-23T19:36:44,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37383 2024-11-23T19:36:44,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37383 2024-11-23T19:36:44,124 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37383 2024-11-23T19:36:44,126 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(746): ClusterId : 4c999074-bcc7-48e1-be4b-fd986739057e 2024-11-23T19:36:44,126 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:36:44,134 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:36:44,134 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:36:44,144 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:36:44,145 DEBUG [RS:1;387b213c044a:37383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bba5bd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:36:44,159 DEBUG [RS:1;387b213c044a:37383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;387b213c044a:37383 2024-11-23T19:36:44,159 INFO [RS:1;387b213c044a:37383 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:36:44,159 INFO [RS:1;387b213c044a:37383 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:36:44,159 DEBUG [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:36:44,160 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,35305,1732390602741 with port=37383, startcode=1732390604084 2024-11-23T19:36:44,160 DEBUG [RS:1;387b213c044a:37383 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:36:44,162 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:36:44,162 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35305 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,37383,1732390604084 2024-11-23T19:36:44,162 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35305 {}] master.ServerManager(517): Registering regionserver=387b213c044a,37383,1732390604084 2024-11-23T19:36:44,164 DEBUG [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed 2024-11-23T19:36:44,164 DEBUG [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39041 2024-11-23T19:36:44,164 DEBUG [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:36:44,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:36:44,175 DEBUG [RS:1;387b213c044a:37383 {}] zookeeper.ZKUtil(111): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,37383,1732390604084 2024-11-23T19:36:44,176 WARN [RS:1;387b213c044a:37383 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:36:44,176 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,37383,1732390604084] 2024-11-23T19:36:44,176 INFO [RS:1;387b213c044a:37383 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:36:44,176 DEBUG [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084 2024-11-23T19:36:44,179 INFO [RS:1;387b213c044a:37383 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:36:44,182 INFO [RS:1;387b213c044a:37383 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:36:44,182 INFO [RS:1;387b213c044a:37383 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:36:44,182 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,183 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:36:44,183 INFO [RS:1;387b213c044a:37383 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:36:44,184 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,184 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,185 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,185 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,185 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,185 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:36:44,185 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:36:44,185 DEBUG [RS:1;387b213c044a:37383 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:36:44,187 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,187 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,187 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,187 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,187 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,187 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37383,1732390604084-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:36:44,204 INFO [RS:1;387b213c044a:37383 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:36:44,204 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,37383,1732390604084-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,205 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,205 INFO [RS:1;387b213c044a:37383 {}] regionserver.Replication(171): 387b213c044a,37383,1732390604084 started 2024-11-23T19:36:44,218 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:36:44,218 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,37383,1732390604084, RpcServer on 387b213c044a/172.17.0.3:37383, sessionid=0x1016932c3900002 2024-11-23T19:36:44,218 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:36:44,218 DEBUG [RS:1;387b213c044a:37383 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,37383,1732390604084 2024-11-23T19:36:44,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;387b213c044a:37383,5,FailOnTimeoutGroup] 2024-11-23T19:36:44,218 DEBUG [RS:1;387b213c044a:37383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,37383,1732390604084' 2024-11-23T19:36:44,218 DEBUG [RS:1;387b213c044a:37383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:36:44,218 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-23T19:36:44,219 DEBUG [RS:1;387b213c044a:37383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:36:44,219 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T19:36:44,219 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:36:44,219 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:36:44,219 DEBUG [RS:1;387b213c044a:37383 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,37383,1732390604084 2024-11-23T19:36:44,219 DEBUG [RS:1;387b213c044a:37383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,37383,1732390604084' 2024-11-23T19:36:44,219 DEBUG [RS:1;387b213c044a:37383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:36:44,220 DEBUG [RS:1;387b213c044a:37383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:36:44,220 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 387b213c044a,35305,1732390602741 2024-11-23T19:36:44,220 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c57f553 2024-11-23T19:36:44,220 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T19:36:44,220 DEBUG [RS:1;387b213c044a:37383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:36:44,220 INFO [RS:1;387b213c044a:37383 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:36:44,221 INFO [RS:1;387b213c044a:37383 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:36:44,222 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53496, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T19:36:44,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T19:36:44,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T19:36:44,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:36:44,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T19:36:44,225 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T19:36:44,226 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:44,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-23T19:36:44,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:36:44,227 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T19:36:44,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741835_1011 (size=393) 2024-11-23T19:36:44,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741835_1011 (size=393) 2024-11-23T19:36:44,236 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8fb00675b5215e6debc026bc19386eeb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed 2024-11-23T19:36:44,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34927 is added to blk_1073741836_1012 (size=76) 2024-11-23T19:36:44,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37185 is added to blk_1073741836_1012 (size=76) 2024-11-23T19:36:44,243 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:44,244 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8fb00675b5215e6debc026bc19386eeb, disabling compactions & flushes 2024-11-23T19:36:44,244 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,244 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,244 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. after waiting 0 ms 2024-11-23T19:36:44,244 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,244 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,244 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8fb00675b5215e6debc026bc19386eeb: Waiting for close lock at 1732390604243Disabling compacts and flushes for region at 1732390604243Disabling writes for close at 1732390604244 (+1 ms)Writing region close event to WAL at 1732390604244Closed at 1732390604244 2024-11-23T19:36:44,246 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T19:36:44,246 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732390604246"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390604246"}]},"ts":"1732390604246"} 2024-11-23T19:36:44,249 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T19:36:44,251 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T19:36:44,251 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390604251"}]},"ts":"1732390604251"} 2024-11-23T19:36:44,254 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-23T19:36:44,254 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8fb00675b5215e6debc026bc19386eeb, ASSIGN}] 2024-11-23T19:36:44,256 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8fb00675b5215e6debc026bc19386eeb, ASSIGN 2024-11-23T19:36:44,257 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8fb00675b5215e6debc026bc19386eeb, ASSIGN; state=OFFLINE, location=387b213c044a,44195,1732390602920; forceNewPlan=false, retain=false 2024-11-23T19:36:44,325 INFO [RS:1;387b213c044a:37383 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C37383%2C1732390604084, suffix=, logDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084, archiveDir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs, maxLogs=32 2024-11-23T19:36:44,327 INFO [RS:1;387b213c044a:37383 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C37383%2C1732390604084.1732390604327 2024-11-23T19:36:44,334 INFO [RS:1;387b213c044a:37383 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 2024-11-23T19:36:44,335 DEBUG [RS:1;387b213c044a:37383 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:41595:41595)] 2024-11-23T19:36:44,409 INFO [387b213c044a:35305 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T19:36:44,409 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8fb00675b5215e6debc026bc19386eeb, regionState=OPENING, regionLocation=387b213c044a,44195,1732390602920 2024-11-23T19:36:44,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8fb00675b5215e6debc026bc19386eeb, ASSIGN because future has completed 2024-11-23T19:36:44,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8fb00675b5215e6debc026bc19386eeb, server=387b213c044a,44195,1732390602920}] 2024-11-23T19:36:44,582 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,582 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8fb00675b5215e6debc026bc19386eeb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:36:44,583 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,583 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:36:44,583 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,583 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,585 INFO [StoreOpener-8fb00675b5215e6debc026bc19386eeb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,587 INFO [StoreOpener-8fb00675b5215e6debc026bc19386eeb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8fb00675b5215e6debc026bc19386eeb columnFamilyName info 2024-11-23T19:36:44,588 DEBUG [StoreOpener-8fb00675b5215e6debc026bc19386eeb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:36:44,588 INFO [StoreOpener-8fb00675b5215e6debc026bc19386eeb-1 {}] regionserver.HStore(327): Store=8fb00675b5215e6debc026bc19386eeb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:36:44,588 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,590 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,590 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,591 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,591 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,594 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,598 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:36:44,599 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8fb00675b5215e6debc026bc19386eeb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699870, jitterRate=-0.11007049679756165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:36:44,599 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:36:44,600 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8fb00675b5215e6debc026bc19386eeb: Running coprocessor pre-open hook at 1732390604583Writing region info on filesystem at 1732390604583Initializing all the Stores at 1732390604585 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390604585Cleaning up temporary data from old regions at 1732390604591 (+6 ms)Running coprocessor post-open hooks at 1732390604599 (+8 ms)Region opened successfully at 1732390604600 (+1 ms) 2024-11-23T19:36:44,601 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb., pid=6, masterSystemTime=1732390604571 2024-11-23T19:36:44,605 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,605 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:44,606 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8fb00675b5215e6debc026bc19386eeb, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,44195,1732390602920 2024-11-23T19:36:44,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8fb00675b5215e6debc026bc19386eeb, server=387b213c044a,44195,1732390602920 because future has completed 2024-11-23T19:36:44,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T19:36:44,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8fb00675b5215e6debc026bc19386eeb, server=387b213c044a,44195,1732390602920 in 192 msec 2024-11-23T19:36:44,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T19:36:44,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8fb00675b5215e6debc026bc19386eeb, ASSIGN in 357 msec 2024-11-23T19:36:44,615 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T19:36:44,615 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390604615"}]},"ts":"1732390604615"} 2024-11-23T19:36:44,617 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-23T19:36:44,619 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T19:36:44,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 396 msec 2024-11-23T19:36:45,005 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:36:45,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:45,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:45,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:45,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:36:45,040 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T19:36:45,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T19:36:45,041 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-23T19:36:45,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:36:45,042 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T19:36:45,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:36:45,042 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T19:36:49,401 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-23T19:36:50,089 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:36:50,095 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:50,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:50,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:36:54,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35305 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:36:54,261 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-23T19:36:54,261 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-23T19:36:54,270 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T19:36:54,270 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:36:54,285 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:54,289 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:54,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:54,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:54,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:36:54,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6030d470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:54,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45d50f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:54,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@495a6aea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-37135-hadoop-hdfs-3_4_1-tests_jar-_-any-15314638938516220697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:54,384 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5bf227cc{HTTP/1.1, (http/1.1)}{localhost:37135} 2024-11-23T19:36:54,384 INFO [Time-limited test {}] server.Server(415): Started @126915ms 2024-11-23T19:36:54,385 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:54,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:54,416 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:54,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:54,417 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:54,417 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:36:54,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca82099{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:54,418 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ed35b1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:54,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1196c8fc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-35849-hadoop-hdfs-3_4_1-tests_jar-_-any-17121182714981823476/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:54,514 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5580c33e{HTTP/1.1, (http/1.1)}{localhost:35849} 2024-11-23T19:36:54,514 INFO [Time-limited test {}] server.Server(415): Started @127045ms 2024-11-23T19:36:54,516 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:54,549 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:36:54,553 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:36:54,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:36:54,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:36:54,554 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:36:54,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fe58b15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:36:54,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5455501c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:36:54,648 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e08dd81{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-46605-hadoop-hdfs-3_4_1-tests_jar-_-any-886217609896636227/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:54,649 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@178f342a{HTTP/1.1, (http/1.1)}{localhost:46605} 2024-11-23T19:36:54,649 INFO [Time-limited test {}] server.Server(415): Started @127179ms 2024-11-23T19:36:54,650 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:36:55,889 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data5/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:55,889 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data6/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:55,905 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6c2e2633ba6c522 with lease ID 0xb6b5517095c97df7: Processing first storage report for DS-dd5d42de-ca18-47c0-b42c-61ba0248e282 from datanode DatanodeRegistration(127.0.0.1:35089, datanodeUuid=416fc751-7f5c-4329-80a9-3fedd342f130, infoPort=38423, infoSecurePort=0, ipcPort=44621, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6c2e2633ba6c522 with lease ID 0xb6b5517095c97df7: from storage DS-dd5d42de-ca18-47c0-b42c-61ba0248e282 node DatanodeRegistration(127.0.0.1:35089, datanodeUuid=416fc751-7f5c-4329-80a9-3fedd342f130, infoPort=38423, infoSecurePort=0, ipcPort=44621, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6c2e2633ba6c522 with lease ID 0xb6b5517095c97df7: Processing first storage report for DS-fe1f6561-5a81-46d1-92c3-8b9387101ca5 from datanode DatanodeRegistration(127.0.0.1:35089, datanodeUuid=416fc751-7f5c-4329-80a9-3fedd342f130, infoPort=38423, infoSecurePort=0, ipcPort=44621, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6c2e2633ba6c522 with lease ID 0xb6b5517095c97df7: from storage DS-fe1f6561-5a81-46d1-92c3-8b9387101ca5 node DatanodeRegistration(127.0.0.1:35089, datanodeUuid=416fc751-7f5c-4329-80a9-3fedd342f130, infoPort=38423, infoSecurePort=0, ipcPort=44621, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:56,001 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data7/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:56,001 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data8/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:56,025 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:56,027 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ac79d37cee392af with lease ID 0xb6b5517095c97df8: Processing first storage report for DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3 from datanode DatanodeRegistration(127.0.0.1:38929, datanodeUuid=1adfc5ba-6de6-4a68-886f-c8eff82e6491, infoPort=37673, infoSecurePort=0, ipcPort=46759, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:56,027 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ac79d37cee392af with lease ID 0xb6b5517095c97df8: from storage DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3 node DatanodeRegistration(127.0.0.1:38929, datanodeUuid=1adfc5ba-6de6-4a68-886f-c8eff82e6491, infoPort=37673, infoSecurePort=0, ipcPort=46759, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:56,027 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ac79d37cee392af with lease ID 0xb6b5517095c97df8: Processing first storage report for DS-487256f6-5c91-43f3-bc04-8d6b45cb9599 from datanode DatanodeRegistration(127.0.0.1:38929, datanodeUuid=1adfc5ba-6de6-4a68-886f-c8eff82e6491, infoPort=37673, infoSecurePort=0, ipcPort=46759, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:56,027 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ac79d37cee392af with lease ID 0xb6b5517095c97df8: from storage DS-487256f6-5c91-43f3-bc04-8d6b45cb9599 node DatanodeRegistration(127.0.0.1:38929, datanodeUuid=1adfc5ba-6de6-4a68-886f-c8eff82e6491, infoPort=37673, infoSecurePort=0, ipcPort=46759, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:56,079 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:56,079 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10/current/BP-1109934133-172.17.0.3-1732390600576/current, will proceed with Du for space computation calculation, 2024-11-23T19:36:56,097 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:36:56,099 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa67d7814999ad549 with lease ID 0xb6b5517095c97df9: Processing first storage report for DS-00e683c2-c80b-4a3b-8ef7-34874f26bead from datanode DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:56,099 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa67d7814999ad549 with lease ID 0xb6b5517095c97df9: from storage DS-00e683c2-c80b-4a3b-8ef7-34874f26bead node DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:56,099 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa67d7814999ad549 with lease ID 0xb6b5517095c97df9: Processing first storage report for DS-d709f40e-a23a-4dca-86cc-8c0bfa7facce from datanode DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576) 2024-11-23T19:36:56,099 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa67d7814999ad549 with lease ID 0xb6b5517095c97df9: from storage DS-d709f40e-a23a-4dca-86cc-8c0bfa7facce node DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:36:56,193 WARN [ResponseProcessor for block BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,193 WARN [ResponseProcessor for block BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,193 WARN [ResponseProcessor for block BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,193 WARN [ResponseProcessor for block BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,195 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:36:56,195 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:36:56,195 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:36:56,195 WARN [PacketResponder: BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34927] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,196 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta block BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:36:56,195 WARN [PacketResponder: BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34927] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,195 WARN [PacketResponder: BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34927] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,197 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-448445082_22 at /127.0.0.1:58644 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58644 dst: /127.0.0.1:37185 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:58602 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58602 dst: /127.0.0.1:37185 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@597807df{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:56,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-448445082_22 at /127.0.0.1:33860 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33860 dst: /127.0.0.1:34927 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:33828 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33828 dst: /127.0.0.1:34927 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_977124268_22 at /127.0.0.1:58570 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58570 dst: /127.0.0.1:37185 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:33830 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33830 dst: /127.0.0.1:34927 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:58590 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37185:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58590 dst: /127.0.0.1:37185 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@401bd933{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:56,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:56,197 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_977124268_22 at /127.0.0.1:33788 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33788 dst: /127.0.0.1:34927 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7f19bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:56,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcfaad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:56,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:56,202 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:56,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:56,202 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1109934133-172.17.0.3-1732390600576 (Datanode Uuid 7e48bb23-6279-4532-ae03-0fdf13f3b82f) service to localhost/127.0.0.1:39041 2024-11-23T19:36:56,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data3/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:56,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data4/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:56,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:56,203 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,203 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,203 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,208 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6e4ede9d {}] datanode.DataXceiver(331): 127.0.0.1:37185:DataXceiver error processing unknown operation src: /127.0.0.1:41914 dst: /127.0.0.1:37185 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:56,208 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta block BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d327fd2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:56,210 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c80aceb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:56,210 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:56,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cb9bebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:56,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68a89b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:56,211 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:56,211 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:56,211 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1109934133-172.17.0.3-1732390600576 (Datanode Uuid ff45096b-2949-4c72-b1a7-5f9fed6e7090) service to localhost/127.0.0.1:39041 2024-11-23T19:36:56,211 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:56,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data1/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:56,212 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data2/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:56,212 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:56,215 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb., hostname=387b213c044a,44195,1732390602920, seqNum=2] 2024-11-23T19:36:56,217 ERROR [FSHLog-0-hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed-prefix:387b213c044a,44195,1732390602920 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,217 WARN [FSHLog-0-hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed-prefix:387b213c044a,44195,1732390602920 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,217 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C44195%2C1732390602920:(num 1732390603583) roll requested 2024-11-23T19:36:56,217 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.1732390616217 2024-11-23T19:36:56,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:56,224 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:56,224 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:56,224 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:56,224 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:36:56,224 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390616217 2024-11-23T19:36:56,225 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,225 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:56,226 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-23T19:36:56,226 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-23T19:36:56,226 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 2024-11-23T19:36:56,227 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37673:37673),(127.0.0.1/127.0.0.1:38423:38423)] 2024-11-23T19:36:56,227 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:36:56,229 WARN [IPC Server handler 2 on default port 39041 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-23T19:36:56,232 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 after 5ms 2024-11-23T19:36:57,038 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:58,187 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:58,227 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:58,228 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390616217 2024-11-23T19:36:58,229 WARN [ResponseProcessor for block BP-1109934133-172.17.0.3-1732390600576:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1109934133-172.17.0.3-1732390600576:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:36:58,230 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390616217 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:36:58,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56354 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56354 dst: /127.0.0.1:38929 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:58,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:42826 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42826 dst: /127.0.0.1:35089 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:36:58,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1196c8fc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:36:58,291 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5580c33e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:36:58,291 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:36:58,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ed35b1e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:36:58,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca82099{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:36:58,293 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:36:58,293 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:36:58,293 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1109934133-172.17.0.3-1732390600576 (Datanode Uuid 1adfc5ba-6de6-4a68-886f-c8eff82e6491) service to localhost/127.0.0.1:39041 2024-11-23T19:36:58,293 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:36:58,294 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data7/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:58,294 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data8/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:36:58,295 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:36:59,039 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:00,187 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:00,228 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:00,229 WARN [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]] 2024-11-23T19:37:00,229 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C44195%2C1732390602920:(num 1732390616217) roll requested 2024-11-23T19:37:00,230 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.1732390620230 2024-11-23T19:37:00,234 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 after 4008ms 2024-11-23T19:37:00,235 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:00,236 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:00,236 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741839_1021 2024-11-23T19:37:00,239 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:00,245 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:00,245 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:00,245 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:00,245 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:00,245 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:00,246 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390616217 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390620230 2024-11-23T19:37:00,246 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533),(127.0.0.1/127.0.0.1:38423:38423)] 2024-11-23T19:37:00,247 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:37:00,247 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390616217 is not closed yet, will try archiving it next time 2024-11-23T19:37:00,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35089 is added to blk_1073741838_1020 (size=2431) 2024-11-23T19:37:00,303 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T19:37:00,650 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:37:01,039 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,188 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,247 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,310 WARN [ResponseProcessor for block BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022 java.io.IOException: Bad response ERROR for BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022 from datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,311 WARN [DataStreamer for file /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390620230 block BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:02,311 WARN [PacketResponder: BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35089] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,312 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:46526 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46526 dst: /127.0.0.1:37691 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,313 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:42848 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35089:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42848 dst: /127.0.0.1:35089 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@495a6aea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:02,386 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5bf227cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:02,386 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:02,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45d50f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:02,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6030d470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:02,388 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:02,388 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:02,389 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:02,389 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1109934133-172.17.0.3-1732390600576 (Datanode Uuid 416fc751-7f5c-4329-80a9-3fedd342f130) service to localhost/127.0.0.1:39041 2024-11-23T19:37:02,389 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data5/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:02,390 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data6/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:02,390 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:02,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:02,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:37:02,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/fe4e89ac20a74010be0e28de46fddcbc is 1080, key is row0002/info:/1732390618297/Put/seqid=0 2024-11-23T19:37:02,424 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34927 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55880 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741841_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741841_1024 to mirror 127.0.0.1:34927 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,424 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:02,424 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741841_1024 2024-11-23T19:37:02,424 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55880 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741841_1024] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:02,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55880 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741841_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55880 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,425 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:02,426 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,426 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:02,426 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741842_1025 2024-11-23T19:37:02,427 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:02,428 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,428 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:02,428 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741843_1026 2024-11-23T19:37:02,429 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:02,431 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:02,431 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55894 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741844_1027 to mirror 127.0.0.1:38929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,431 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:02,431 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55894 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:02,431 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741844_1027 2024-11-23T19:37:02,431 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55894 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55894 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:02,431 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:02,432 WARN [IPC Server handler 1 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:02,432 WARN [IPC Server handler 1 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:02,432 WARN [IPC Server handler 1 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:02,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741845_1028 (size=10347) 2024-11-23T19:37:02,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/fe4e89ac20a74010be0e28de46fddcbc 2024-11-23T19:37:02,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/fe4e89ac20a74010be0e28de46fddcbc as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/fe4e89ac20a74010be0e28de46fddcbc 2024-11-23T19:37:02,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/fe4e89ac20a74010be0e28de46fddcbc, entries=5, sequenceid=11, filesize=10.1 K 2024-11-23T19:37:02,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8fb00675b5215e6debc026bc19386eeb in 450ms, sequenceid=11, compaction requested=false 2024-11-23T19:37:02,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:03,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-23T19:37:03,040 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:03,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/df347be6789d447e8ba0cbe7760085e2 is 1080, key is row0007/info:/1732390622402/Put/seqid=0 2024-11-23T19:37:03,043 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37185 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:03,043 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55922 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741846_1029 to mirror 127.0.0.1:37185 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:03,043 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:03,043 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741846_1029 2024-11-23T19:37:03,043 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55922 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:03,043 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55922 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55922 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:03,044 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:03,045 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:03,046 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:03,046 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741847_1030 2024-11-23T19:37:03,046 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:03,047 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:03,047 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:03,048 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741848_1031 2024-11-23T19:37:03,048 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:03,050 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:03,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55936 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741849_1032 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:03,050 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:03,050 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741849_1032 2024-11-23T19:37:03,051 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55936 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:03,051 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55936 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55936 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:03,051 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:03,052 WARN [IPC Server handler 1 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:03,052 WARN [IPC Server handler 1 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:03,052 WARN [IPC Server handler 1 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:03,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741850_1033 (size=12506) 2024-11-23T19:37:03,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/df347be6789d447e8ba0cbe7760085e2 2024-11-23T19:37:03,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/df347be6789d447e8ba0cbe7760085e2 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2 2024-11-23T19:37:03,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2, entries=7, sequenceid=24, filesize=12.2 K 2024-11-23T19:37:03,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8fb00675b5215e6debc026bc19386eeb in 446ms, sequenceid=24, compaction requested=false 2024-11-23T19:37:03,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:03,479 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-23T19:37:03,479 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:03,479 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2 because midkey is the same as first or last row 2024-11-23T19:37:04,188 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,248 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,248 WARN [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]] 2024-11-23T19:37:04,249 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C44195%2C1732390602920:(num 1732390620230) roll requested 2024-11-23T19:37:04,250 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.1732390624249 2024-11-23T19:37:04,257 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37185 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55958 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741851_1034 to mirror 127.0.0.1:37185 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,257 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:04,257 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741851_1034 2024-11-23T19:37:04,257 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55958 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T19:37:04,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55958 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55958 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,258 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:04,259 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,259 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:04,259 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741852_1035 2024-11-23T19:37:04,260 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:04,262 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55962 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741853_1036 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,262 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:04,262 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741853_1036 2024-11-23T19:37:04,262 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55962 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T19:37:04,262 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55962 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55962 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,263 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:04,265 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55964 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741854_1037 to mirror 127.0.0.1:38929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,265 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:04,265 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741854_1037 2024-11-23T19:37:04,265 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55964 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T19:37:04,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55964 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55964 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,266 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:04,266 WARN [IPC Server handler 4 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:04,266 WARN [IPC Server handler 4 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:04,266 WARN [IPC Server handler 4 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:04,269 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:04,269 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:04,269 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:04,269 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:04,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:04,269 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390620230 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390624249 2024-11-23T19:37:04,270 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533)] 2024-11-23T19:37:04,270 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:37:04,270 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390620230 is not closed yet, will try archiving it next time 2024-11-23T19:37:04,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741840_1023 (size=25992) 2024-11-23T19:37:04,272 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390616217 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs/387b213c044a%2C44195%2C1732390602920.1732390616217 2024-11-23T19:37:04,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:04,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T19:37:04,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/15c28c2e1fef46f69b102de0709bf673 is 1079, key is tmprow/info:/1732390624463/Put/seqid=0 2024-11-23T19:37:04,477 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34927 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55982 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741856_1039 to mirror 127.0.0.1:34927 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,477 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:04,477 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741856_1039 2024-11-23T19:37:04,477 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55982 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:04,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55982 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55982 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,478 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:04,481 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55998 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741857_1040 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,481 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:04,481 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741857_1040 2024-11-23T19:37:04,482 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55998 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:04,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:55998 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55998 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,482 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:04,485 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37185 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,485 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56000 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741858_1041 to mirror 127.0.0.1:37185 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,485 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:04,485 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741858_1041 2024-11-23T19:37:04,485 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56000 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:04,486 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56000 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56000 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,486 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:04,488 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,488 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:04,488 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741859_1042 2024-11-23T19:37:04,489 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:04,489 WARN [IPC Server handler 0 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:04,489 WARN [IPC Server handler 0 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:04,489 WARN [IPC Server handler 0 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:04,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741860_1043 (size=6027) 2024-11-23T19:37:04,673 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:37:04,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/15c28c2e1fef46f69b102de0709bf673 2024-11-23T19:37:04,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/15c28c2e1fef46f69b102de0709bf673 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/15c28c2e1fef46f69b102de0709bf673 2024-11-23T19:37:04,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/15c28c2e1fef46f69b102de0709bf673, entries=1, sequenceid=34, filesize=5.9 K 2024-11-23T19:37:04,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8fb00675b5215e6debc026bc19386eeb in 454ms, sequenceid=34, compaction requested=true 2024-11-23T19:37:04,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:04,921 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-23T19:37:04,921 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:04,921 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2 because midkey is the same as first or last row 2024-11-23T19:37:04,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8fb00675b5215e6debc026bc19386eeb:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:37:04,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:37:04,922 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:37:04,923 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:37:04,924 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1541): 8fb00675b5215e6debc026bc19386eeb/info is initiating minor compaction (all files) 2024-11-23T19:37:04,924 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8fb00675b5215e6debc026bc19386eeb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:04,924 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/fe4e89ac20a74010be0e28de46fddcbc, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/15c28c2e1fef46f69b102de0709bf673] into tmpdir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp, totalSize=28.2 K 2024-11-23T19:37:04,924 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe4e89ac20a74010be0e28de46fddcbc, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732390618297 2024-11-23T19:37:04,925 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting df347be6789d447e8ba0cbe7760085e2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732390622402 2024-11-23T19:37:04,925 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting 15c28c2e1fef46f69b102de0709bf673, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732390624463 2024-11-23T19:37:04,937 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8fb00675b5215e6debc026bc19386eeb#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:37:04,938 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/c41764b6f1f04655be3589a1f18d6351 is 1080, key is row0002/info:/1732390618297/Put/seqid=0 2024-11-23T19:37:04,939 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,940 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:04,940 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741861_1044 2024-11-23T19:37:04,940 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:04,942 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,942 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:04,942 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741862_1045 2024-11-23T19:37:04,942 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:04,944 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,944 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:04,944 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741863_1046 2024-11-23T19:37:04,944 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:04,946 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37185 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:04,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56032 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741864_1047 to mirror 127.0.0.1:37185 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,947 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:04,947 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741864_1047 2024-11-23T19:37:04,947 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56032 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:04,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56032 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56032 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:04,947 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:04,948 WARN [IPC Server handler 3 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:04,948 WARN [IPC Server handler 3 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:04,948 WARN [IPC Server handler 3 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:04,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741865_1048 (size=17994) 2024-11-23T19:37:05,040 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:05,371 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/c41764b6f1f04655be3589a1f18d6351 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 2024-11-23T19:37:05,379 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8fb00675b5215e6debc026bc19386eeb/info of 8fb00675b5215e6debc026bc19386eeb into c41764b6f1f04655be3589a1f18d6351(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:05,379 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb., storeName=8fb00675b5215e6debc026bc19386eeb/info, priority=13, startTime=1732390624921; duration=0sec 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 because midkey is the same as first or last row 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:05,379 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 because midkey is the same as first or last row 2024-11-23T19:37:05,380 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-23T19:37:05,380 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:05,380 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 because midkey is the same as first or last row 2024-11-23T19:37:05,380 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:37:05,380 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8fb00675b5215e6debc026bc19386eeb:info 2024-11-23T19:37:05,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:05,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T19:37:05,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/8e67143bbb15491ab05f8c438cba0955 is 1079, key is tmprow/info:/1732390625903/Put/seqid=0 2024-11-23T19:37:05,917 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:05,918 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:05,918 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741866_1049 2024-11-23T19:37:05,919 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:05,920 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:05,920 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:05,920 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741867_1050 2024-11-23T19:37:05,921 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:05,922 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:05,922 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:05,922 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741868_1051 2024-11-23T19:37:05,923 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:05,925 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:05,925 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56050 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741869_1052 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:05,925 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:05,925 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741869_1052 2024-11-23T19:37:05,925 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56050 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:05,925 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56050 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56050 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:05,926 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:05,927 WARN [IPC Server handler 2 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:05,927 WARN [IPC Server handler 2 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:05,927 WARN [IPC Server handler 2 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:05,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741870_1053 (size=6027) 2024-11-23T19:37:06,119 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2be82041[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741850_1033 to 127.0.0.1:35089 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:06,119 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50b67a8f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741845_1028 to 127.0.0.1:34927 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:06,189 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:06,272 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:06,273 WARN [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]] 2024-11-23T19:37:06,273 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C44195%2C1732390602920:(num 1732390624249) roll requested 2024-11-23T19:37:06,274 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.1732390626273 2024-11-23T19:37:06,279 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:06,279 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:06,279 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741871_1054 2024-11-23T19:37:06,280 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:06,282 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:06,282 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:06,283 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741872_1055 2024-11-23T19:37:06,284 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:06,286 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:06,286 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:06,286 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741873_1056 2024-11-23T19:37:06,287 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:06,292 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34927 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:06,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56082 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741874_1057 to mirror 127.0.0.1:34927 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:06,292 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:06,292 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741874_1057 2024-11-23T19:37:06,292 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56082 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T19:37:06,292 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56082 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56082 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:06,293 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:06,294 WARN [IPC Server handler 4 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:06,294 WARN [IPC Server handler 4 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:06,294 WARN [IPC Server handler 4 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:06,297 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:06,297 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:06,297 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:06,297 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:06,297 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:06,298 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390624249 with entries=13, filesize=11.78 KB; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390626273 2024-11-23T19:37:06,298 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533)] 2024-11-23T19:37:06,298 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:37:06,298 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390624249 is not closed yet, will try archiving it next time 2024-11-23T19:37:06,299 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390620230 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs/387b213c044a%2C44195%2C1732390602920.1732390620230 2024-11-23T19:37:06,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741855_1038 (size=12066) 2024-11-23T19:37:06,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/8e67143bbb15491ab05f8c438cba0955 2024-11-23T19:37:06,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/8e67143bbb15491ab05f8c438cba0955 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8e67143bbb15491ab05f8c438cba0955 2024-11-23T19:37:06,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8e67143bbb15491ab05f8c438cba0955, entries=1, sequenceid=45, filesize=5.9 K 2024-11-23T19:37:06,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8fb00675b5215e6debc026bc19386eeb in 447ms, sequenceid=45, compaction requested=false 2024-11-23T19:37:06,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:06,352 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-23T19:37:06,352 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:06,352 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 because midkey is the same as first or last row 2024-11-23T19:37:06,702 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 is not closed yet, will try archiving it next time 2024-11-23T19:37:07,040 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,107 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50b67a8f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741840_1023 to 127.0.0.1:37185 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,107 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2be82041[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741860_1043 to 127.0.0.1:34927 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:07,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T19:37:07,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/8f6422d4ce9b402790b563e54e333d93 is 1079, key is tmprow/info:/1732390627331/Put/seqid=0 2024-11-23T19:37:07,345 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,345 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:07,345 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741876_1059 2024-11-23T19:37:07,345 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:07,347 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,347 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:07,347 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741877_1060 2024-11-23T19:37:07,347 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:07,349 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,349 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:07,349 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741878_1061 2024-11-23T19:37:07,349 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:07,351 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37185 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,351 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56094 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741879_1062 to mirror 127.0.0.1:37185 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,352 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:07,352 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741879_1062 2024-11-23T19:37:07,352 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56094 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:07,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56094 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56094 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,352 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:07,353 WARN [IPC Server handler 2 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:07,353 WARN [IPC Server handler 2 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:07,353 WARN [IPC Server handler 2 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:07,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741880_1063 (size=6027) 2024-11-23T19:37:07,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/8f6422d4ce9b402790b563e54e333d93 2024-11-23T19:37:07,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/8f6422d4ce9b402790b563e54e333d93 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8f6422d4ce9b402790b563e54e333d93 2024-11-23T19:37:07,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8f6422d4ce9b402790b563e54e333d93, entries=1, sequenceid=55, filesize=5.9 K 2024-11-23T19:37:07,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8fb00675b5215e6debc026bc19386eeb in 438ms, sequenceid=55, compaction requested=true 2024-11-23T19:37:07,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:07,773 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-23T19:37:07,773 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:07,773 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 because midkey is the same as first or last row 2024-11-23T19:37:07,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8fb00675b5215e6debc026bc19386eeb:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:37:07,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:37:07,773 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:37:07,774 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:37:07,774 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1541): 8fb00675b5215e6debc026bc19386eeb/info is initiating minor compaction (all files) 2024-11-23T19:37:07,775 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8fb00675b5215e6debc026bc19386eeb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:07,775 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8e67143bbb15491ab05f8c438cba0955, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8f6422d4ce9b402790b563e54e333d93] into tmpdir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp, totalSize=29.3 K 2024-11-23T19:37:07,775 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting c41764b6f1f04655be3589a1f18d6351, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732390618297 2024-11-23T19:37:07,776 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e67143bbb15491ab05f8c438cba0955, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732390625903 2024-11-23T19:37:07,776 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8f6422d4ce9b402790b563e54e333d93, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732390627331 2024-11-23T19:37:07,793 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8fb00675b5215e6debc026bc19386eeb#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:37:07,794 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/6ceddab6e04a48b9b9e920969f64d14a is 1080, key is row0002/info:/1732390618297/Put/seqid=0 2024-11-23T19:37:07,796 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,796 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK], DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK]) is bad. 2024-11-23T19:37:07,796 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741881_1064 2024-11-23T19:37:07,797 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38929,DS-e13c2ae4-2bec-4993-bd15-5cd7eb7941d3,DISK] 2024-11-23T19:37:07,798 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,799 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]) is bad. 2024-11-23T19:37:07,799 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741882_1065 2024-11-23T19:37:07,799 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34927,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK] 2024-11-23T19:37:07,802 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37185 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,801 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56104 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741883_1066 to mirror 127.0.0.1:37185 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,802 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]) is bad. 2024-11-23T19:37:07,802 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741883_1066 2024-11-23T19:37:07,802 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56104 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:07,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56104 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56104 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,802 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK] 2024-11-23T19:37:07,805 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:07,805 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56106 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741884_1067 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,805 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:07,805 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741884_1067 2024-11-23T19:37:07,805 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56106 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:07,805 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:56106 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56106 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:07,805 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:07,806 WARN [IPC Server handler 2 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-23T19:37:07,806 WARN [IPC Server handler 2 on default port 39041 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-23T19:37:07,806 WARN [IPC Server handler 2 on default port 39041 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-23T19:37:07,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741885_1068 (size=18097) 2024-11-23T19:37:08,190 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:08,219 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/6ceddab6e04a48b9b9e920969f64d14a as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a 2024-11-23T19:37:08,229 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8fb00675b5215e6debc026bc19386eeb/info of 8fb00675b5215e6debc026bc19386eeb into 6ceddab6e04a48b9b9e920969f64d14a(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:37:08,229 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:08,229 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb., storeName=8fb00675b5215e6debc026bc19386eeb/info, priority=13, startTime=1732390627773; duration=0sec 2024-11-23T19:37:08,229 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-23T19:37:08,229 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a because midkey is the same as first or last row 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a because midkey is the same as first or last row 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a because midkey is the same as first or last row 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:37:08,230 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8fb00675b5215e6debc026bc19386eeb:info 2024-11-23T19:37:08,299 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:08,299 WARN [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-23T19:37:08,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:08,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:08,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:08,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:08,370 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:37:08,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@182fe9c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:08,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@366bb257{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:08,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@434810ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/java.io.tmpdir/jetty-localhost-33171-hadoop-hdfs-3_4_1-tests_jar-_-any-11194609245395474878/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:08,465 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@381443d3{HTTP/1.1, (http/1.1)}{localhost:33171} 2024-11-23T19:37:08,465 INFO [Time-limited test {}] server.Server(415): Started @140995ms 2024-11-23T19:37:08,466 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:08,988 WARN [Thread-993 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:08,995 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0f191471b3b0c7d with lease ID 0xb6b5517095c97dfa: from storage DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e node DatanodeRegistration(127.0.0.1:44593, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=39611, infoSecurePort=0, ipcPort=46817, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:08,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd0f191471b3b0c7d with lease ID 0xb6b5517095c97dfa: from storage DS-3a7b9d5c-2092-41df-810b-85a1fbcbb2a7 node DatanodeRegistration(127.0.0.1:44593, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=39611, infoSecurePort=0, ipcPort=46817, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:09,041 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:09,108 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2be82041[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37691, datanodeUuid=ccb28add-2541-4416-9a4c-faad8a4c47f0, infoPort=38533, infoSecurePort=0, ipcPort=35795, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741865_1048 to 127.0.0.1:38929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741870_1053 (size=6027) 2024-11-23T19:37:10,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741855_1038 (size=12066) 2024-11-23T19:37:10,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741880_1063 (size=6027) 2024-11-23T19:37:10,190 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:10,300 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:11,041 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:12,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741885_1068 (size=18097) 2024-11-23T19:37:12,191 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:12,300 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:12,722 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:37:13,042 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:13,280 ERROR [FSHLog-0-hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData-prefix:387b213c044a,35305,1732390602741 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:13,281 WARN [FSHLog-0-hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData-prefix:387b213c044a,35305,1732390602741 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:13,281 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C35305%2C1732390602741:(num 1732390603059) roll requested 2024-11-23T19:37:13,282 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C35305%2C1732390602741.1732390633281 2024-11-23T19:37:13,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:13,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:13,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:13,294 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:13,294 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:13,294 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390633281 2024-11-23T19:37:13,295 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:13,295 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:13,295 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 2024-11-23T19:37:13,295 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39611:39611),(127.0.0.1/127.0.0.1:38533:38533)] 2024-11-23T19:37:13,296 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 is not closed yet, will try archiving it next time 2024-11-23T19:37:13,296 WARN [IPC Server handler 4 on default port 39041 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741830_1006 2024-11-23T19:37:13,296 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 after 1ms 2024-11-23T19:37:14,192 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:14,301 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:16,192 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:16,301 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:17,299 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 after 4004ms 2024-11-23T19:37:18,193 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:18,302 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:19,013 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@313d1b7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:37185,null,null]) java.net.ConnectException: Call From 387b213c044a/172.17.0.3 to localhost:35157 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-23T19:37:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741833_1019 (size=455) 2024-11-23T19:37:19,268 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390603583 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs/387b213c044a%2C44195%2C1732390602920.1732390603583 2024-11-23T19:37:19,270 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390624249 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs/387b213c044a%2C44195%2C1732390602920.1732390624249 2024-11-23T19:37:20,194 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:20,302 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:20,994 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@325ab664[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44593, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=39611, infoSecurePort=0, ipcPort=46817, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741833_1019 to 127.0.0.1:35089 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:21,989 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.1732390641988 2024-11-23T19:37:21,999 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:21,999 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:21,999 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:21,999 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:21,999 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:21,999 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390626273 with entries=14, filesize=12.95 KB; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390641988 2024-11-23T19:37:22,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741875_1058 (size=13268) 2024-11-23T19:37:22,001 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38533:38533),(127.0.0.1/127.0.0.1:39611:39611)] 2024-11-23T19:37:22,001 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390626273 is not closed yet, will try archiving it next time 2024-11-23T19:37:22,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:22,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-23T19:37:22,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/a1002f8ea9bc4b2da1027c98a71fb2e4 is 1080, key is row0013/info:/1732390642002/Put/seqid=0 2024-11-23T19:37:22,018 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:50444 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741888_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741888_1072 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:22,018 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:22,018 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741888_1072 2024-11-23T19:37:22,018 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:50444 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741888_1072] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-23T19:37:22,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:50444 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741888_1072] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50444 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:22,019 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:22,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741889_1073 (size=9267) 2024-11-23T19:37:22,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741889_1073 (size=9267) 2024-11-23T19:37:22,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/a1002f8ea9bc4b2da1027c98a71fb2e4 2024-11-23T19:37:22,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/a1002f8ea9bc4b2da1027c98a71fb2e4 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/a1002f8ea9bc4b2da1027c98a71fb2e4 2024-11-23T19:37:22,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/a1002f8ea9bc4b2da1027c98a71fb2e4, entries=4, sequenceid=66, filesize=9.0 K 2024-11-23T19:37:22,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 8fb00675b5215e6debc026bc19386eeb in 37ms, sequenceid=66, compaction requested=false 2024-11-23T19:37:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-23T19:37:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a because midkey is the same as first or last row 2024-11-23T19:37:22,194 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44195 {}] regionserver.HRegion(8855): Flush requested on 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:22,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8fb00675b5215e6debc026bc19386eeb 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-23T19:37:22,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/903027a6a3b64cf8b600443dc5328315 is 1080, key is row0016/info:/1732390642009/Put/seqid=0 2024-11-23T19:37:22,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741890_1074 (size=13583) 2024-11-23T19:37:22,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741890_1074 (size=13583) 2024-11-23T19:37:22,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/903027a6a3b64cf8b600443dc5328315 2024-11-23T19:37:22,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/903027a6a3b64cf8b600443dc5328315 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/903027a6a3b64cf8b600443dc5328315 2024-11-23T19:37:22,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/903027a6a3b64cf8b600443dc5328315, entries=8, sequenceid=78, filesize=13.3 K 2024-11-23T19:37:22,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9682, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8fb00675b5215e6debc026bc19386eeb in 37ms, sequenceid=78, compaction requested=true 2024-11-23T19:37:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-23T19:37:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a because midkey is the same as first or last row 2024-11-23T19:37:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8fb00675b5215e6debc026bc19386eeb:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:37:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:37:22,274 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:37:22,276 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:37:22,276 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1541): 8fb00675b5215e6debc026bc19386eeb/info is initiating minor compaction (all files) 2024-11-23T19:37:22,276 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8fb00675b5215e6debc026bc19386eeb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:22,276 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/a1002f8ea9bc4b2da1027c98a71fb2e4, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/903027a6a3b64cf8b600443dc5328315] into tmpdir=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp, totalSize=40.0 K 2024-11-23T19:37:22,277 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ceddab6e04a48b9b9e920969f64d14a, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732390618297 2024-11-23T19:37:22,277 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1002f8ea9bc4b2da1027c98a71fb2e4, keycount=4, bloomtype=ROW, size=9.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1732390628145 2024-11-23T19:37:22,278 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] compactions.Compactor(225): Compacting 903027a6a3b64cf8b600443dc5328315, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732390642009 2024-11-23T19:37:22,294 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8fb00675b5215e6debc026bc19386eeb#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:37:22,295 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/e485537e12d94425a9638aaf5b970dd2 is 1080, key is row0002/info:/1732390618297/Put/seqid=0 2024-11-23T19:37:22,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741891_1075 (size=28989) 2024-11-23T19:37:22,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741891_1075 (size=28989) 2024-11-23T19:37:22,303 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,303 INFO [regionserver/387b213c044a:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-23T19:37:22,310 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/.tmp/info/e485537e12d94425a9638aaf5b970dd2 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/e485537e12d94425a9638aaf5b970dd2 2024-11-23T19:37:22,317 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8fb00675b5215e6debc026bc19386eeb/info of 8fb00675b5215e6debc026bc19386eeb into e485537e12d94425a9638aaf5b970dd2(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:37:22,317 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8fb00675b5215e6debc026bc19386eeb: 2024-11-23T19:37:22,317 INFO [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb., storeName=8fb00675b5215e6debc026bc19386eeb/info, priority=13, startTime=1732390642274; duration=0sec 2024-11-23T19:37:22,317 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-23T19:37:22,317 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:22,317 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/e485537e12d94425a9638aaf5b970dd2 because midkey is the same as first or last row 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/e485537e12d94425a9638aaf5b970dd2 because midkey is the same as first or last row 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/e485537e12d94425a9638aaf5b970dd2 because midkey is the same as first or last row 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:37:22,318 DEBUG [RS:0;387b213c044a:44195-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8fb00675b5215e6debc026bc19386eeb:info 2024-11-23T19:37:22,402 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.1732390626273 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs/387b213c044a%2C44195%2C1732390602920.1732390626273 2024-11-23T19:37:22,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:37:22,437 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:37:22,437 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:22,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:22,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:22,437 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:37:22,437 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:37:22,437 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=401360406, stopped=false 2024-11-23T19:37:22,438 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,35305,1732390602741 2024-11-23T19:37:22,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:22,486 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:22,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:22,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:22,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:22,486 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:22,486 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:37:22,486 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:37:22,486 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:22,486 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:22,486 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:22,486 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:22,487 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,44195,1732390602920' ***** 2024-11-23T19:37:22,487 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:37:22,487 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,37383,1732390604084' ***** 2024-11-23T19:37:22,487 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:37:22,487 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:37:22,487 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(3091): Received CLOSE for 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:22,487 INFO [RS:1;387b213c044a:37383 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:37:22,487 INFO [RS:1;387b213c044a:37383 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:37:22,487 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:37:22,487 INFO [RS:1;387b213c044a:37383 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:37:22,487 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,37383,1732390604084 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,44195,1732390602920 2024-11-23T19:37:22,487 INFO [RS:1;387b213c044a:37383 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:37:22,487 INFO [RS:0;387b213c044a:44195 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:44195. 2024-11-23T19:37:22,487 INFO [RS:1;387b213c044a:37383 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;387b213c044a:37383. 2024-11-23T19:37:22,488 DEBUG [RS:0;387b213c044a:44195 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:22,488 DEBUG [RS:1;387b213c044a:37383 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:22,488 DEBUG [RS:0;387b213c044a:44195 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:22,488 DEBUG [RS:1;387b213c044a:37383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:22,488 INFO [RS:0;387b213c044a:44195 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:37:22,488 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,37383,1732390604084; all regions closed. 2024-11-23T19:37:22,488 INFO [RS:0;387b213c044a:44195 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:37:22,488 INFO [RS:0;387b213c044a:44195 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:37:22,488 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:37:22,488 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8fb00675b5215e6debc026bc19386eeb, disabling compactions & flushes 2024-11-23T19:37:22,488 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:22,488 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:22,488 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. after waiting 0 ms 2024-11-23T19:37:22,488 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:22,488 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T19:37:22,488 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8fb00675b5215e6debc026bc19386eeb=TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.} 2024-11-23T19:37:22,488 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,488 DEBUG [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8fb00675b5215e6debc026bc19386eeb 2024-11-23T19:37:22,488 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,488 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:37:22,488 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,488 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:37:22,489 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:37:22,489 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:37:22,489 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,489 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:37:22,489 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,489 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-23T19:37:22,489 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/fe4e89ac20a74010be0e28de46fddcbc, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/15c28c2e1fef46f69b102de0709bf673, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8e67143bbb15491ab05f8c438cba0955, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8f6422d4ce9b402790b563e54e333d93, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/a1002f8ea9bc4b2da1027c98a71fb2e4, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/903027a6a3b64cf8b600443dc5328315] to archive 2024-11-23T19:37:22,489 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,489 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,489 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 2024-11-23T19:37:22,489 ERROR [FSHLog-0-hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed-prefix:387b213c044a,44195,1732390602920.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,489 WARN [FSHLog-0-hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed-prefix:387b213c044a,44195,1732390602920.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,489 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C44195%2C1732390602920.meta:.meta(num 1732390603946) roll requested 2024-11-23T19:37:22,490 WARN [IPC Server handler 0 on default port 39041 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741837_1013 2024-11-23T19:37:22,490 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C44195%2C1732390602920.meta.1732390642490.meta 2024-11-23T19:37:22,490 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 after 1ms 2024-11-23T19:37:22,490 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T19:37:22,492 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/fe4e89ac20a74010be0e28de46fddcbc to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/fe4e89ac20a74010be0e28de46fddcbc 2024-11-23T19:37:22,493 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35089 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,493 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:50480 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741892_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10]'}, localName='127.0.0.1:37691', datanodeUuid='ccb28add-2541-4416-9a4c-faad8a4c47f0', xmitsInProgress=0}:Exception transferring block BP-1109934133-172.17.0.3-1732390600576:blk_1073741892_1077 to mirror 127.0.0.1:35089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:22,494 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741892_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37691,DS-00e683c2-c80b-4a3b-8ef7-34874f26bead,DISK], DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:22,494 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741892_1077 2024-11-23T19:37:22,494 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:50480 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741892_1077] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-23T19:37:22,494 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1581461959_22 at /127.0.0.1:50480 [Receiving block BP-1109934133-172.17.0.3-1732390600576:blk_1073741892_1077] {}] datanode.DataXceiver(331): 127.0.0.1:37691:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50480 dst: /127.0.0.1:37691 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:22,494 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/df347be6789d447e8ba0cbe7760085e2 2024-11-23T19:37:22,494 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:22,496 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/c41764b6f1f04655be3589a1f18d6351 2024-11-23T19:37:22,498 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/15c28c2e1fef46f69b102de0709bf673 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/15c28c2e1fef46f69b102de0709bf673 2024-11-23T19:37:22,498 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,498 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,499 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,499 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,499 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390642490.meta 2024-11-23T19:37:22,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,499 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8e67143bbb15491ab05f8c438cba0955 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8e67143bbb15491ab05f8c438cba0955 2024-11-23T19:37:22,500 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37185,DS-ececda8f-d48e-479e-ab76-ea68bd433370,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta 2024-11-23T19:37:22,500 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39611:39611),(127.0.0.1/127.0.0.1:38533:38533)] 2024-11-23T19:37:22,500 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta is not closed yet, will try archiving it next time 2024-11-23T19:37:22,500 WARN [IPC Server handler 2 on default port 39041 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741834_1010 2024-11-23T19:37:22,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta after 0ms 2024-11-23T19:37:22,501 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/6ceddab6e04a48b9b9e920969f64d14a 2024-11-23T19:37:22,502 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8f6422d4ce9b402790b563e54e333d93 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/8f6422d4ce9b402790b563e54e333d93 2024-11-23T19:37:22,504 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/a1002f8ea9bc4b2da1027c98a71fb2e4 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/a1002f8ea9bc4b2da1027c98a71fb2e4 2024-11-23T19:37:22,505 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/903027a6a3b64cf8b600443dc5328315 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/info/903027a6a3b64cf8b600443dc5328315 2024-11-23T19:37:22,505 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=387b213c044a:35305 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T19:37:22,505 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fe4e89ac20a74010be0e28de46fddcbc=10347, df347be6789d447e8ba0cbe7760085e2=12506, c41764b6f1f04655be3589a1f18d6351=17994, 15c28c2e1fef46f69b102de0709bf673=6027, 8e67143bbb15491ab05f8c438cba0955=6027, 6ceddab6e04a48b9b9e920969f64d14a=18097, 8f6422d4ce9b402790b563e54e333d93=6027, a1002f8ea9bc4b2da1027c98a71fb2e4=9267, 903027a6a3b64cf8b600443dc5328315=13583] 2024-11-23T19:37:22,506 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T19:37:22,507 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T19:37:22,512 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8fb00675b5215e6debc026bc19386eeb/recovered.edits/82.seqid, newMaxSeqId=82, maxSeqId=1 2024-11-23T19:37:22,513 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:22,513 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8fb00675b5215e6debc026bc19386eeb: Waiting for close lock at 1732390642488Running coprocessor pre-close hooks at 1732390642488Disabling compacts and flushes for region at 1732390642488Disabling writes for close at 1732390642488Writing region close event to WAL at 1732390642508 (+20 ms)Running coprocessor post-close hooks at 1732390642512 (+4 ms)Closed at 1732390642512 2024-11-23T19:37:22,513 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb. 2024-11-23T19:37:22,516 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/info/00b055f350224053a2400c61fe93b6ec is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732390604222.8fb00675b5215e6debc026bc19386eeb./info:regioninfo/1732390604605/Put/seqid=0 2024-11-23T19:37:22,518 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:22,518 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1109934133-172.17.0.3-1732390600576:blk_1073741894_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-def68fd1-6ed6-4d8d-b6d5-6e5e2e98f34e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK]) is bad. 2024-11-23T19:37:22,518 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-1109934133-172.17.0.3-1732390600576:blk_1073741894_1080 2024-11-23T19:37:22,518 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35089,DS-dd5d42de-ca18-47c0-b42c-61ba0248e282,DISK] 2024-11-23T19:37:22,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741895_1081 (size=7089) 2024-11-23T19:37:22,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741895_1081 (size=7089) 2024-11-23T19:37:22,524 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/info/00b055f350224053a2400c61fe93b6ec 2024-11-23T19:37:22,549 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/ns/1dd4d9d781954319bf66348cb2d3c9f0 is 43, key is default/ns:d/1732390604024/Put/seqid=0 2024-11-23T19:37:22,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741896_1082 (size=5153) 2024-11-23T19:37:22,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741896_1082 (size=5153) 2024-11-23T19:37:22,555 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/ns/1dd4d9d781954319bf66348cb2d3c9f0 2024-11-23T19:37:22,575 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/table/ce763cfbbf1d4c7981862916033d62d8 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732390604615/Put/seqid=0 2024-11-23T19:37:22,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741897_1083 (size=5424) 2024-11-23T19:37:22,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741897_1083 (size=5424) 2024-11-23T19:37:22,580 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/table/ce763cfbbf1d4c7981862916033d62d8 2024-11-23T19:37:22,586 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/info/00b055f350224053a2400c61fe93b6ec as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/info/00b055f350224053a2400c61fe93b6ec 2024-11-23T19:37:22,592 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/info/00b055f350224053a2400c61fe93b6ec, entries=10, sequenceid=11, filesize=6.9 K 2024-11-23T19:37:22,594 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/ns/1dd4d9d781954319bf66348cb2d3c9f0 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/ns/1dd4d9d781954319bf66348cb2d3c9f0 2024-11-23T19:37:22,600 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/ns/1dd4d9d781954319bf66348cb2d3c9f0, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T19:37:22,601 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/.tmp/table/ce763cfbbf1d4c7981862916033d62d8 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/table/ce763cfbbf1d4c7981862916033d62d8 2024-11-23T19:37:22,608 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/table/ce763cfbbf1d4c7981862916033d62d8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-23T19:37:22,610 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 121ms, sequenceid=11, compaction requested=false 2024-11-23T19:37:22,617 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T19:37:22,618 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:37:22,618 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:37:22,618 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390642488Running coprocessor pre-close hooks at 1732390642488Disabling compacts and flushes for region at 1732390642488Disabling writes for close at 1732390642489 (+1 ms)Obtaining lock to block concurrent updates at 1732390642489Preparing flush snapshotting stores in 1588230740 at 1732390642489Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732390642489Flushing stores of hbase:meta,,1.1588230740 at 1732390642500 (+11 ms)Flushing 1588230740/info: creating writer at 1732390642500Flushing 1588230740/info: appending metadata at 1732390642516 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732390642516Flushing 1588230740/ns: creating writer at 1732390642530 (+14 ms)Flushing 1588230740/ns: appending metadata at 1732390642548 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732390642548Flushing 1588230740/table: creating writer at 1732390642561 (+13 ms)Flushing 1588230740/table: appending metadata at 1732390642574 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732390642574Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4029c741: reopening flushed file at 1732390642586 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17ce2c2a: reopening flushed file at 1732390642593 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a478d49: reopening flushed file at 1732390642600 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 121ms, sequenceid=11, compaction requested=false at 1732390642610 (+10 ms)Writing region close event to WAL at 1732390642613 (+3 ms)Running coprocessor post-close hooks at 1732390642618 (+5 ms)Closed at 1732390642618 2024-11-23T19:37:22,618 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:37:22,688 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,44195,1732390602920; all regions closed. 2024-11-23T19:37:22,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,689 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:22,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741893_1078 (size=825) 2024-11-23T19:37:22,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741893_1078 (size=825) 2024-11-23T19:37:23,204 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T19:37:23,205 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T19:37:23,416 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:37:23,996 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@633c2ef8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44593, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=39611, infoSecurePort=0, ipcPort=46817, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741832_1008 to 127.0.0.1:35089 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:24,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741836_1012 (size=76) 2024-11-23T19:37:24,190 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:37:24,409 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T19:37:24,409 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T19:37:24,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-23T19:37:24,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:37:24,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:37:24,996 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@325ab664[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44593, datanodeUuid=7e48bb23-6279-4532-ae03-0fdf13f3b82f, infoPort=39611, infoSecurePort=0, ipcPort=46817, storageInfo=lv=-57;cid=testClusterID;nsid=570888632;c=1732390600576):Failed to transfer BP-1109934133-172.17.0.3-1732390600576:blk_1073741828_1004 to 127.0.0.1:35089 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:25,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:37:26,492 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 after 4003ms 2024-11-23T19:37:26,502 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta after 4002ms 2024-11-23T19:37:27,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:37:27,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:37:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741875_1058 (size=13268) 2024-11-23T19:37:27,490 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-23T19:37:27,496 DEBUG [RS:1;387b213c044a:37383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs 2024-11-23T19:37:27,496 INFO [RS:1;387b213c044a:37383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C37383%2C1732390604084:(num 1732390604327) 2024-11-23T19:37:27,496 DEBUG [RS:1;387b213c044a:37383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:27,496 INFO [RS:1;387b213c044a:37383 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:37:27,497 INFO [RS:1;387b213c044a:37383 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:37:27,497 INFO [RS:1;387b213c044a:37383 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:37:27,498 INFO [RS:1;387b213c044a:37383 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:37:27,498 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:37:27,498 INFO [RS:1;387b213c044a:37383 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:37:27,498 INFO [RS:1;387b213c044a:37383 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:37:27,498 INFO [RS:1;387b213c044a:37383 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:37:27,498 INFO [RS:1;387b213c044a:37383 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37383 2024-11-23T19:37:27,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:27,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:37:27,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,37383,1732390604084 2024-11-23T19:37:27,559 INFO [RS:1;387b213c044a:37383 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:37:27,572 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,37383,1732390604084] 2024-11-23T19:37:27,582 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,37383,1732390604084 already deleted, retry=false 2024-11-23T19:37:27,582 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,37383,1732390604084 expired; onlineServers=1 2024-11-23T19:37:27,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:37:27,672 INFO [RS:1;387b213c044a:37383 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:37:27,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37383-0x1016932c3900002, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:37:27,672 INFO [RS:1;387b213c044a:37383 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,37383,1732390604084; zookeeper connection closed. 2024-11-23T19:37:27,673 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7da6fd3f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7da6fd3f 2024-11-23T19:37:27,690 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-23T19:37:27,700 DEBUG [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs 2024-11-23T19:37:27,700 INFO [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C44195%2C1732390602920.meta:.meta(num 1732390642490) 2024-11-23T19:37:27,701 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,701 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,702 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,702 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,702 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741887_1071 (size=16308) 2024-11-23T19:37:27,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741887_1071 (size=16308) 2024-11-23T19:37:27,706 DEBUG [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs 2024-11-23T19:37:27,706 INFO [RS:0;387b213c044a:44195 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C44195%2C1732390602920:(num 1732390641988) 2024-11-23T19:37:27,706 DEBUG [RS:0;387b213c044a:44195 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:27,706 INFO [RS:0;387b213c044a:44195 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:37:27,706 INFO [RS:0;387b213c044a:44195 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:37:27,706 INFO [RS:0;387b213c044a:44195 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:37:27,707 INFO [RS:0;387b213c044a:44195 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:37:27,707 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:37:27,707 INFO [RS:0;387b213c044a:44195 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44195 2024-11-23T19:37:27,719 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,44195,1732390602920 2024-11-23T19:37:27,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:37:27,719 INFO [RS:0;387b213c044a:44195 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:37:27,810 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,44195,1732390602920] 2024-11-23T19:37:27,835 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,44195,1732390602920 already deleted, retry=false 2024-11-23T19:37:27,835 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,44195,1732390602920 expired; onlineServers=0 2024-11-23T19:37:27,835 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,35305,1732390602741' ***** 2024-11-23T19:37:27,835 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:37:27,835 INFO [M:0;387b213c044a:35305 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:37:27,835 INFO [M:0;387b213c044a:35305 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:37:27,835 DEBUG [M:0;387b213c044a:35305 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:37:27,835 DEBUG [M:0;387b213c044a:35305 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:37:27,835 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:37:27,835 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390603278 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390603278,5,FailOnTimeoutGroup] 2024-11-23T19:37:27,835 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390603278 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390603278,5,FailOnTimeoutGroup] 2024-11-23T19:37:27,836 INFO [M:0;387b213c044a:35305 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:37:27,836 INFO [M:0;387b213c044a:35305 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:37:27,836 DEBUG [M:0;387b213c044a:35305 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:37:27,836 INFO [M:0;387b213c044a:35305 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:37:27,836 INFO [M:0;387b213c044a:35305 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:37:27,836 INFO [M:0;387b213c044a:35305 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:37:27,836 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:37:27,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:37:27,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:27,845 DEBUG [M:0;387b213c044a:35305 {}] zookeeper.ZKUtil(347): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:37:27,845 WARN [M:0;387b213c044a:35305 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:37:27,846 INFO [M:0;387b213c044a:35305 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/.lastflushedseqids 2024-11-23T19:37:27,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741898_1084 (size=130) 2024-11-23T19:37:27,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741898_1084 (size=130) 2024-11-23T19:37:27,854 INFO [M:0;387b213c044a:35305 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:37:27,854 INFO [M:0;387b213c044a:35305 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:37:27,854 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:37:27,854 INFO [M:0;387b213c044a:35305 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:27,855 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:27,855 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:37:27,855 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:27,855 INFO [M:0;387b213c044a:35305 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-23T19:37:27,875 DEBUG [M:0;387b213c044a:35305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b1c645e9baa4a98bb27c8cd1e7aeb3d is 82, key is hbase:meta,,1/info:regioninfo/1732390603976/Put/seqid=0 2024-11-23T19:37:27,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741899_1085 (size=5672) 2024-11-23T19:37:27,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741899_1085 (size=5672) 2024-11-23T19:37:27,880 INFO [M:0;387b213c044a:35305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b1c645e9baa4a98bb27c8cd1e7aeb3d 2024-11-23T19:37:27,899 DEBUG [M:0;387b213c044a:35305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9671d59205cb44b7be8ad2e51bd205d7 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732390604620/Put/seqid=0 2024-11-23T19:37:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741900_1086 (size=6255) 2024-11-23T19:37:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741900_1086 (size=6255) 2024-11-23T19:37:27,904 INFO [M:0;387b213c044a:35305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9671d59205cb44b7be8ad2e51bd205d7 2024-11-23T19:37:27,908 INFO [M:0;387b213c044a:35305 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9671d59205cb44b7be8ad2e51bd205d7 2024-11-23T19:37:27,910 INFO [RS:0;387b213c044a:44195 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:37:27,910 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:37:27,910 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44195-0x1016932c3900001, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:37:27,910 INFO [RS:0;387b213c044a:44195 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,44195,1732390602920; zookeeper connection closed. 2024-11-23T19:37:27,911 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@14c4ee89 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@14c4ee89 2024-11-23T19:37:27,911 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-23T19:37:27,923 DEBUG [M:0;387b213c044a:35305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aef67b50057e4130a867168c7d2ef5f5 is 69, key is 387b213c044a,37383,1732390604084/rs:state/1732390604162/Put/seqid=0 2024-11-23T19:37:27,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741901_1087 (size=5224) 2024-11-23T19:37:27,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741901_1087 (size=5224) 2024-11-23T19:37:27,928 INFO [M:0;387b213c044a:35305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aef67b50057e4130a867168c7d2ef5f5 2024-11-23T19:37:27,953 DEBUG [M:0;387b213c044a:35305 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b90f486198104a868945ba7ab436c56b is 52, key is load_balancer_on/state:d/1732390604062/Put/seqid=0 2024-11-23T19:37:27,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741902_1088 (size=5056) 2024-11-23T19:37:27,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741902_1088 (size=5056) 2024-11-23T19:37:27,958 INFO [M:0;387b213c044a:35305 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b90f486198104a868945ba7ab436c56b 2024-11-23T19:37:27,964 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b1c645e9baa4a98bb27c8cd1e7aeb3d as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b1c645e9baa4a98bb27c8cd1e7aeb3d 2024-11-23T19:37:27,969 INFO [M:0;387b213c044a:35305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b1c645e9baa4a98bb27c8cd1e7aeb3d, entries=8, sequenceid=60, filesize=5.5 K 2024-11-23T19:37:27,971 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9671d59205cb44b7be8ad2e51bd205d7 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9671d59205cb44b7be8ad2e51bd205d7 2024-11-23T19:37:27,976 INFO [M:0;387b213c044a:35305 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9671d59205cb44b7be8ad2e51bd205d7 2024-11-23T19:37:27,976 INFO [M:0;387b213c044a:35305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9671d59205cb44b7be8ad2e51bd205d7, entries=6, sequenceid=60, filesize=6.1 K 2024-11-23T19:37:27,977 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aef67b50057e4130a867168c7d2ef5f5 as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aef67b50057e4130a867168c7d2ef5f5 2024-11-23T19:37:27,983 INFO [M:0;387b213c044a:35305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aef67b50057e4130a867168c7d2ef5f5, entries=2, sequenceid=60, filesize=5.1 K 2024-11-23T19:37:27,984 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b90f486198104a868945ba7ab436c56b as hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b90f486198104a868945ba7ab436c56b 2024-11-23T19:37:27,989 INFO [M:0;387b213c044a:35305 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b90f486198104a868945ba7ab436c56b, entries=1, sequenceid=60, filesize=4.9 K 2024-11-23T19:37:27,991 INFO [M:0;387b213c044a:35305 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=60, compaction requested=false 2024-11-23T19:37:27,992 INFO [M:0;387b213c044a:35305 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:27,992 DEBUG [M:0;387b213c044a:35305 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390647854Disabling compacts and flushes for region at 1732390647854Disabling writes for close at 1732390647855 (+1 ms)Obtaining lock to block concurrent updates at 1732390647855Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390647855Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732390647855Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390647857 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390647857Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390647875 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390647875Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390647885 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390647898 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390647899 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390647909 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390647922 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390647922Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390647933 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390647952 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390647952Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47cb476c: reopening flushed file at 1732390647963 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6de1f4a8: reopening flushed file at 1732390647970 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f4d2c86: reopening flushed file at 1732390647976 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a743113: reopening flushed file at 1732390647983 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=60, compaction requested=false at 1732390647991 (+8 ms)Writing region close event to WAL at 1732390647992 (+1 ms)Closed at 1732390647992 2024-11-23T19:37:27,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,994 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,994 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:27,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:37:27,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37691 is added to blk_1073741886_1069 (size=1045) 2024-11-23T19:37:27,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741886_1069 (size=1045) 2024-11-23T19:37:28,055 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:37:28,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,070 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:28,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:28,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:29,018 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@69cef80 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:37185,null,null]) java.net.ConnectException: Call From 387b213c044a/172.17.0.3 to localhost:35157 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-23T19:37:29,318 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/WALs/387b213c044a,35305,1732390602741/387b213c044a%2C35305%2C1732390602741.1732390603059 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/oldWALs/387b213c044a%2C35305%2C1732390602741.1732390603059 2024-11-23T19:37:29,387 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/MasterData/oldWALs/387b213c044a%2C35305%2C1732390602741.1732390603059 to hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/oldWALs/387b213c044a%2C35305%2C1732390602741.1732390603059$masterlocalwal$ 2024-11-23T19:37:29,388 INFO [M:0;387b213c044a:35305 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:37:29,388 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:37:29,388 INFO [M:0;387b213c044a:35305 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35305 2024-11-23T19:37:29,388 INFO [M:0;387b213c044a:35305 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:37:29,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:29,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:29,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:37:29,537 INFO [M:0;387b213c044a:35305 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:37:29,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35305-0x1016932c3900000, quorum=127.0.0.1:56596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:37:29,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@434810ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:29,540 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@381443d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:29,540 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:29,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@366bb257{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:29,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@182fe9c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:29,542 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:29,542 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:29,542 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1109934133-172.17.0.3-1732390600576 (Datanode Uuid 7e48bb23-6279-4532-ae03-0fdf13f3b82f) service to localhost/127.0.0.1:39041 2024-11-23T19:37:29,542 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:29,541 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:37185,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35157 , LocalHost:localPort 387b213c044a/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-23T19:37:29,542 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:44593,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1109934133-172.17.0.3-1732390600576 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:29,542 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37185,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:37185,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]] 2024-11-23T19:37:29,543 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37185,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1109934133-172.17.0.3-1732390600576 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:29,543 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44593,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1109934133-172.17.0.3-1732390600576 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:29,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data3/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:29,543 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37185,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1109934133-172.17.0.3-1732390600576:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:37185,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]] 2024-11-23T19:37:29,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data4/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:29,544 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:29,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e08dd81{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:29,549 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@178f342a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:29,549 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:29,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5455501c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:29,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fe58b15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:29,550 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:29,550 WARN [BP-1109934133-172.17.0.3-1732390600576 heartbeating to localhost/127.0.0.1:39041 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1109934133-172.17.0.3-1732390600576 (Datanode Uuid ccb28add-2541-4416-9a4c-faad8a4c47f0) service to localhost/127.0.0.1:39041 2024-11-23T19:37:29,550 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:29,551 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:29,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data9/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:29,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/cluster_b0e2a98e-ea80-48b7-a059-cf9ff498a647/data/data10/current/BP-1109934133-172.17.0.3-1732390600576 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:29,551 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:29,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@94a50db{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:37:29,557 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:29,557 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:29,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:29,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:29,566 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:37:29,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:37:29,611 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39041 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39041 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39041 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39041 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f142cbf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f142cbf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39041 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36231 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39041 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39041 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39041 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39041 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39041 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39041 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=532 (was 1009), ProcessCount=11 (was 11), AvailableMemoryMB=2874 (was 2523) - AvailableMemoryMB LEAK? - 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=532, ProcessCount=11, AvailableMemoryMB=2873 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.log.dir so I do NOT create it in target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e40835d0-9a45-48dc-d9b2-9a92105d0d55/hadoop.tmp.dir so I do NOT create it in target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091, deleteOnExit=true 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/test.cache.data in system properties and HBase conf 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:37:29,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:37:29,619 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:37:29,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:37:29,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:37:29,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:37:29,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:37:29,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:37:29,631 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:37:30,064 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:30,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:30,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:30,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:30,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:37:30,070 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:30,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@119a3311{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:30,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e13d66c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:30,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1974987b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-45419-hadoop-hdfs-3_4_1-tests_jar-_-any-12948007467991117027/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:37:30,165 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10c583a0{HTTP/1.1, (http/1.1)}{localhost:45419} 2024-11-23T19:37:30,165 INFO [Time-limited test {}] server.Server(415): Started @162696ms 2024-11-23T19:37:30,176 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:37:30,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:30,438 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:30,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:30,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:30,439 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:37:30,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9982f0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:30,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a7cb65f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:30,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:30,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:30,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@393a832c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-46429-hadoop-hdfs-3_4_1-tests_jar-_-any-16290159119701170060/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:30,532 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@301612e4{HTTP/1.1, (http/1.1)}{localhost:46429} 2024-11-23T19:37:30,533 INFO [Time-limited test {}] server.Server(415): Started @163064ms 2024-11-23T19:37:30,534 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:30,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:30,564 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:30,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:30,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:30,565 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:37:30,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16ec9c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:30,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41d5af00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:30,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57faf252{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-45059-hadoop-hdfs-3_4_1-tests_jar-_-any-8016398234743517672/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:30,661 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50b10539{HTTP/1.1, (http/1.1)}{localhost:45059} 2024-11-23T19:37:30,661 INFO [Time-limited test {}] server.Server(415): Started @163192ms 2024-11-23T19:37:30,662 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:31,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:31,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:31,732 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data1/current/BP-109659227-172.17.0.3-1732390649642/current, will proceed with Du for space computation calculation, 2024-11-23T19:37:31,732 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data2/current/BP-109659227-172.17.0.3-1732390649642/current, will proceed with Du for space computation calculation, 2024-11-23T19:37:31,752 WARN [Thread-1176 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:31,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3c9a16d2f2376d72 with lease ID 0xb6b7783695dfa22b: Processing first storage report for DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9 from datanode DatanodeRegistration(127.0.0.1:42909, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=43805, infoSecurePort=0, ipcPort=40917, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642) 2024-11-23T19:37:31,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c9a16d2f2376d72 with lease ID 0xb6b7783695dfa22b: from storage DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9 node DatanodeRegistration(127.0.0.1:42909, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=43805, infoSecurePort=0, ipcPort=40917, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:31,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3c9a16d2f2376d72 with lease ID 0xb6b7783695dfa22b: Processing first storage report for DS-8f13d30f-ede1-4946-bca4-8111187ae08d from datanode DatanodeRegistration(127.0.0.1:42909, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=43805, infoSecurePort=0, ipcPort=40917, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642) 2024-11-23T19:37:31,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c9a16d2f2376d72 with lease ID 0xb6b7783695dfa22b: from storage DS-8f13d30f-ede1-4946-bca4-8111187ae08d node DatanodeRegistration(127.0.0.1:42909, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=43805, infoSecurePort=0, ipcPort=40917, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:31,852 WARN [Thread-1223 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data3/current/BP-109659227-172.17.0.3-1732390649642/current, will proceed with Du for space computation calculation, 2024-11-23T19:37:31,852 WARN [Thread-1224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data4/current/BP-109659227-172.17.0.3-1732390649642/current, will proceed with Du for space computation calculation, 2024-11-23T19:37:31,878 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:31,880 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b4ccda63a2224ed with lease ID 0xb6b7783695dfa22c: Processing first storage report for DS-fc4c7074-4d51-44bd-9635-ef051106be7d from datanode DatanodeRegistration(127.0.0.1:43857, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=39621, infoSecurePort=0, ipcPort=32863, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642) 2024-11-23T19:37:31,880 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b4ccda63a2224ed with lease ID 0xb6b7783695dfa22c: from storage DS-fc4c7074-4d51-44bd-9635-ef051106be7d node DatanodeRegistration(127.0.0.1:43857, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=39621, infoSecurePort=0, ipcPort=32863, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:31,880 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b4ccda63a2224ed with lease ID 0xb6b7783695dfa22c: Processing first storage report for DS-e4da55b2-d014-437e-92bc-9ab52d495339 from datanode DatanodeRegistration(127.0.0.1:43857, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=39621, infoSecurePort=0, ipcPort=32863, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642) 2024-11-23T19:37:31,880 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b4ccda63a2224ed with lease ID 0xb6b7783695dfa22c: from storage DS-e4da55b2-d014-437e-92bc-9ab52d495339 node DatanodeRegistration(127.0.0.1:43857, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=39621, infoSecurePort=0, ipcPort=32863, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:31,898 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c 2024-11-23T19:37:31,901 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/zookeeper_0, clientPort=64930, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:37:31,902 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64930 2024-11-23T19:37:31,903 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:31,905 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:31,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:37:31,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:37:31,917 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426 with version=8 2024-11-23T19:37:31,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:37:31,919 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:37:31,919 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:37:31,920 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43349 2024-11-23T19:37:31,922 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43349 connecting to ZooKeeper ensemble=127.0.0.1:64930 2024-11-23T19:37:31,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:433490x0, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:37:31,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43349-0x101693383a80000 connected 2024-11-23T19:37:32,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:32,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:32,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:32,103 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426, hbase.cluster.distributed=false 2024-11-23T19:37:32,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:37:32,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43349 2024-11-23T19:37:32,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43349 2024-11-23T19:37:32,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43349 2024-11-23T19:37:32,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43349 2024-11-23T19:37:32,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43349 2024-11-23T19:37:32,126 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:37:32,127 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38387 2024-11-23T19:37:32,129 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38387 connecting to ZooKeeper ensemble=127.0.0.1:64930 2024-11-23T19:37:32,129 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:32,131 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:32,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383870x0, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:37:32,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38387-0x101693383a80001 connected 2024-11-23T19:37:32,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:32,146 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:37:32,146 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:37:32,147 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:37:32,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:37:32,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38387 2024-11-23T19:37:32,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38387 2024-11-23T19:37:32,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38387 2024-11-23T19:37:32,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38387 2024-11-23T19:37:32,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38387 2024-11-23T19:37:32,165 DEBUG [M:0;387b213c044a:43349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:43349 2024-11-23T19:37:32,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,43349,1732390651919 2024-11-23T19:37:32,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:37:32,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:37:32,177 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,43349,1732390651919 2024-11-23T19:37:32,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:37:32,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,190 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:37:32,191 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,43349,1732390651919 from backup master directory 2024-11-23T19:37:32,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,43349,1732390651919 2024-11-23T19:37:32,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:37:32,200 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:37:32,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:37:32,200 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,43349,1732390651919 2024-11-23T19:37:32,209 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/hbase.id] with ID: bccd8cbc-7400-4d25-bbce-3c05b57f7db3 2024-11-23T19:37:32,209 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/.tmp/hbase.id 2024-11-23T19:37:32,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:37:32,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:37:32,215 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/.tmp/hbase.id]:[hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/hbase.id] 2024-11-23T19:37:32,226 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:32,226 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:37:32,228 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T19:37:32,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:37:32,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:37:32,246 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:37:32,246 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:37:32,247 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:37:32,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:37:32,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:37:32,254 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store 2024-11-23T19:37:32,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:37:32,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:37:32,260 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:37:32,260 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:37:32,260 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:32,260 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:32,260 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:37:32,260 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:32,260 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:37:32,260 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390652260Disabling compacts and flushes for region at 1732390652260Disabling writes for close at 1732390652260Writing region close event to WAL at 1732390652260Closed at 1732390652260 2024-11-23T19:37:32,261 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/.initializing 2024-11-23T19:37:32,261 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919 2024-11-23T19:37:32,264 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C43349%2C1732390651919, suffix=, logDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919, archiveDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/oldWALs, maxLogs=10 2024-11-23T19:37:32,264 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43349%2C1732390651919.1732390652264 2024-11-23T19:37:32,268 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 2024-11-23T19:37:32,271 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39621:39621),(127.0.0.1/127.0.0.1:43805:43805)] 2024-11-23T19:37:32,272 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:37:32,272 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:37:32,272 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,272 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:37:32,276 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:32,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:37:32,278 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:37:32,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:37:32,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:37:32,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:37:32,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:37:32,281 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,282 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,282 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,283 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,283 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,284 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:37:32,285 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:37:32,286 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:37:32,287 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866210, jitterRate=0.10144412517547607}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:37:32,287 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390652272Initializing all the Stores at 1732390652273 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390652273Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390652274 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390652274Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390652274Cleaning up temporary data from old regions at 1732390652283 (+9 ms)Region opened successfully at 1732390652287 (+4 ms) 2024-11-23T19:37:32,287 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:37:32,290 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31eb2705, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:37:32,291 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:37:32,291 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:37:32,291 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:37:32,291 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:37:32,292 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T19:37:32,292 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T19:37:32,292 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:37:32,298 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:37:32,299 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:37:32,305 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:37:32,305 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:37:32,306 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:37:32,316 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:37:32,316 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:37:32,317 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:37:32,326 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:37:32,327 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:37:32,337 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:37:32,339 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:37:32,347 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:37:32,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:32,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:32,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,359 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,43349,1732390651919, sessionid=0x101693383a80000, setting cluster-up flag (Was=false) 2024-11-23T19:37:32,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,411 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:37:32,412 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,43349,1732390651919 2024-11-23T19:37:32,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:32,463 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:37:32,464 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,43349,1732390651919 2024-11-23T19:37:32,466 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:37:32,467 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:37:32,467 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:37:32,468 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:37:32,468 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,43349,1732390651919 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:37:32,469 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:37:32,469 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:37:32,469 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:37:32,469 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:37:32,470 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:37:32,470 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,470 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:37:32,470 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390682473 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:37:32,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:37:32,474 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,474 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:37:32,474 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:37:32,475 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,475 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:37:32,476 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:37:32,476 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:37:32,476 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:37:32,477 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:37:32,477 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:37:32,477 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390652477,5,FailOnTimeoutGroup] 2024-11-23T19:37:32,478 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390652477,5,FailOnTimeoutGroup] 2024-11-23T19:37:32,478 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,478 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:37:32,478 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,478 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:37:32,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:37:32,483 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:37:32,483 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426 2024-11-23T19:37:32,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:37:32,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:37:32,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:37:32,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:37:32,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:37:32,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:32,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:37:32,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:37:32,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:32,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:37:32,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:37:32,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:32,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:32,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:37:32,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:37:32,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:32,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:32,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:37:32,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740 2024-11-23T19:37:32,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740 2024-11-23T19:37:32,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:37:32,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:37:32,505 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:37:32,506 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:37:32,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:32,508 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:37:32,509 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789629, jitterRate=0.004065454006195068}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:37:32,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390652494Initializing all the Stores at 1732390652494Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390652494Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390652495 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390652495Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390652495Cleaning up temporary data from old regions at 1732390652505 (+10 ms)Region opened successfully at 1732390652509 (+4 ms) 2024-11-23T19:37:32,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:37:32,509 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:37:32,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:37:32,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:37:32,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:37:32,510 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:37:32,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390652509Disabling compacts and flushes for region at 1732390652509Disabling writes for close at 1732390652509Writing region close event to WAL at 1732390652510 (+1 ms)Closed at 1732390652510 2024-11-23T19:37:32,511 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:37:32,511 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:37:32,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:37:32,513 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:37:32,514 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:37:32,552 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(746): ClusterId : bccd8cbc-7400-4d25-bbce-3c05b57f7db3 2024-11-23T19:37:32,552 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:37:32,559 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:37:32,559 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:37:32,569 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:37:32,570 DEBUG [RS:0;387b213c044a:38387 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c63e1fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:37:32,581 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:38387 2024-11-23T19:37:32,581 INFO [RS:0;387b213c044a:38387 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:37:32,581 INFO [RS:0;387b213c044a:38387 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:37:32,581 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:37:32,581 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,43349,1732390651919 with port=38387, startcode=1732390652126 2024-11-23T19:37:32,582 DEBUG [RS:0;387b213c044a:38387 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:37:32,583 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54017, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:37:32,584 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43349 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,38387,1732390652126 2024-11-23T19:37:32,584 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43349 {}] master.ServerManager(517): Registering regionserver=387b213c044a,38387,1732390652126 2024-11-23T19:37:32,586 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426 2024-11-23T19:37:32,586 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40593 2024-11-23T19:37:32,586 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:37:32,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:37:32,598 DEBUG [RS:0;387b213c044a:38387 {}] zookeeper.ZKUtil(111): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,38387,1732390652126 2024-11-23T19:37:32,598 WARN [RS:0;387b213c044a:38387 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:37:32,598 INFO [RS:0;387b213c044a:38387 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:37:32,598 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126 2024-11-23T19:37:32,598 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,38387,1732390652126] 2024-11-23T19:37:32,601 INFO [RS:0;387b213c044a:38387 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:37:32,604 INFO [RS:0;387b213c044a:38387 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:37:32,604 INFO [RS:0;387b213c044a:38387 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:37:32,604 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,605 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:37:32,605 INFO [RS:0;387b213c044a:38387 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:37:32,605 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:37:32,606 DEBUG [RS:0;387b213c044a:38387 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:37:32,610 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,610 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,610 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,610 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,610 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,610 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38387,1732390652126-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:37:32,625 INFO [RS:0;387b213c044a:38387 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:37:32,625 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,38387,1732390652126-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,625 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,625 INFO [RS:0;387b213c044a:38387 {}] regionserver.Replication(171): 387b213c044a,38387,1732390652126 started 2024-11-23T19:37:32,639 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:32,640 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,38387,1732390652126, RpcServer on 387b213c044a/172.17.0.3:38387, sessionid=0x101693383a80001 2024-11-23T19:37:32,640 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:37:32,640 DEBUG [RS:0;387b213c044a:38387 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,38387,1732390652126 2024-11-23T19:37:32,640 DEBUG [RS:0;387b213c044a:38387 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,38387,1732390652126' 2024-11-23T19:37:32,640 DEBUG [RS:0;387b213c044a:38387 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:37:32,640 DEBUG [RS:0;387b213c044a:38387 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,38387,1732390652126 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,38387,1732390652126' 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:37:32,641 DEBUG [RS:0;387b213c044a:38387 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:37:32,641 INFO [RS:0;387b213c044a:38387 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:37:32,641 INFO [RS:0;387b213c044a:38387 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:37:32,664 WARN [387b213c044a:43349 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T19:37:32,745 INFO [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C38387%2C1732390652126, suffix=, logDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126, archiveDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs, maxLogs=32 2024-11-23T19:37:32,747 INFO [RS:0;387b213c044a:38387 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:32,755 INFO [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:32,761 DEBUG [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39621:39621),(127.0.0.1/127.0.0.1:43805:43805)] 2024-11-23T19:37:32,914 DEBUG [387b213c044a:43349 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:37:32,915 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,38387,1732390652126 2024-11-23T19:37:32,918 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,38387,1732390652126, state=OPENING 2024-11-23T19:37:32,958 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:37:33,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:33,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:33,046 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:37:33,047 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:37:33,047 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:37:33,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,38387,1732390652126}] 2024-11-23T19:37:33,204 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:37:33,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51221, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:37:33,214 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:37:33,214 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:37:33,217 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C38387%2C1732390652126.meta, suffix=.meta, logDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126, archiveDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs, maxLogs=32 2024-11-23T19:37:33,218 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta 2024-11-23T19:37:33,224 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta 2024-11-23T19:37:33,225 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39621:39621),(127.0.0.1/127.0.0.1:43805:43805)] 2024-11-23T19:37:33,226 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:37:33,226 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:37:33,227 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:37:33,227 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:37:33,227 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:37:33,227 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:37:33,227 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:37:33,227 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:37:33,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:37:33,230 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:37:33,230 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:33,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:33,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:37:33,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:37:33,231 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:33,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:33,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:37:33,233 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:37:33,233 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:33,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:33,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:37:33,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:37:33,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:33,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:37:33,234 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:37:33,235 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740 2024-11-23T19:37:33,236 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740 2024-11-23T19:37:33,237 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:37:33,237 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:37:33,238 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:37:33,239 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:37:33,240 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878172, jitterRate=0.1166539341211319}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:37:33,240 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:37:33,241 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390653227Writing region info on filesystem at 1732390653227Initializing all the Stores at 1732390653228 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390653228Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390653228Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390653228Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390653228Cleaning up temporary data from old regions at 1732390653237 (+9 ms)Running coprocessor post-open hooks at 1732390653240 (+3 ms)Region opened successfully at 1732390653241 (+1 ms) 2024-11-23T19:37:33,242 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390653203 2024-11-23T19:37:33,244 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:37:33,244 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:37:33,245 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,38387,1732390652126 2024-11-23T19:37:33,246 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,38387,1732390652126, state=OPEN 2024-11-23T19:37:33,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:37:33,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:37:33,302 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,38387,1732390652126 2024-11-23T19:37:33,302 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:37:33,302 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:37:33,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:37:33,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,38387,1732390652126 in 255 msec 2024-11-23T19:37:33,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:37:33,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 797 msec 2024-11-23T19:37:33,312 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:37:33,312 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:37:33,314 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:37:33,314 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,38387,1732390652126, seqNum=-1] 2024-11-23T19:37:33,314 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:37:33,315 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34447, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:37:33,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 853 msec 2024-11-23T19:37:33,321 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390653321, completionTime=-1 2024-11-23T19:37:33,321 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:37:33,321 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:37:33,322 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:37:33,322 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390713322 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390773323 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43349,1732390651919-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43349,1732390651919-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43349,1732390651919-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:43349, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,323 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,325 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.126sec 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43349,1732390651919-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:37:33,326 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43349,1732390651919-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:37:33,329 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:37:33,329 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:37:33,329 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43349,1732390651919-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:37:33,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25ea055e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:37:33,353 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,43349,-1 for getting cluster id 2024-11-23T19:37:33,353 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:37:33,354 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bccd8cbc-7400-4d25-bbce-3c05b57f7db3' 2024-11-23T19:37:33,355 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:37:33,355 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bccd8cbc-7400-4d25-bbce-3c05b57f7db3" 2024-11-23T19:37:33,355 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8da49e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:37:33,355 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,43349,-1] 2024-11-23T19:37:33,355 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:37:33,356 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:33,357 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39858, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:37:33,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@211eec9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:37:33,358 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:37:33,359 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,38387,1732390652126, seqNum=-1] 2024-11-23T19:37:33,359 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:37:33,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:37:33,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,43349,1732390651919 2024-11-23T19:37:33,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:37:33,365 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:37:33,365 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-23T19:37:33,365 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-23T19:37:33,366 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T19:37:33,366 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 387b213c044a,43349,1732390651919 2024-11-23T19:37:33,366 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5f78b918 2024-11-23T19:37:33,367 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T19:37:33,368 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39870, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T19:37:33,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T19:37:33,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T19:37:33,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:37:33,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T19:37:33,372 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T19:37:33,372 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:33,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-23T19:37:33,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:37:33,373 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T19:37:33,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741835_1011 (size=395) 2024-11-23T19:37:33,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741835_1011 (size=395) 2024-11-23T19:37:33,382 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f669b96bafe8323b074c81d49adfb5bb, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426 2024-11-23T19:37:33,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42909 is added to blk_1073741836_1012 (size=78) 2024-11-23T19:37:33,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741836_1012 (size=78) 2024-11-23T19:37:33,389 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:37:33,389 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing f669b96bafe8323b074c81d49adfb5bb, disabling compactions & flushes 2024-11-23T19:37:33,389 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,389 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,389 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. after waiting 0 ms 2024-11-23T19:37:33,389 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,389 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,389 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for f669b96bafe8323b074c81d49adfb5bb: Waiting for close lock at 1732390653389Disabling compacts and flushes for region at 1732390653389Disabling writes for close at 1732390653389Writing region close event to WAL at 1732390653389Closed at 1732390653389 2024-11-23T19:37:33,391 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T19:37:33,391 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732390653391"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390653391"}]},"ts":"1732390653391"} 2024-11-23T19:37:33,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T19:37:33,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T19:37:33,395 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390653395"}]},"ts":"1732390653395"} 2024-11-23T19:37:33,398 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-23T19:37:33,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f669b96bafe8323b074c81d49adfb5bb, ASSIGN}] 2024-11-23T19:37:33,400 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f669b96bafe8323b074c81d49adfb5bb, ASSIGN 2024-11-23T19:37:33,401 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f669b96bafe8323b074c81d49adfb5bb, ASSIGN; state=OFFLINE, location=387b213c044a,38387,1732390652126; forceNewPlan=false, retain=false 2024-11-23T19:37:33,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:33,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:33,552 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f669b96bafe8323b074c81d49adfb5bb, regionState=OPENING, regionLocation=387b213c044a,38387,1732390652126 2024-11-23T19:37:33,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f669b96bafe8323b074c81d49adfb5bb, ASSIGN because future has completed 2024-11-23T19:37:33,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f669b96bafe8323b074c81d49adfb5bb, server=387b213c044a,38387,1732390652126}] 2024-11-23T19:37:33,727 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,727 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f669b96bafe8323b074c81d49adfb5bb, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:37:33,727 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,727 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:37:33,727 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,727 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,730 INFO [StoreOpener-f669b96bafe8323b074c81d49adfb5bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,732 INFO [StoreOpener-f669b96bafe8323b074c81d49adfb5bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f669b96bafe8323b074c81d49adfb5bb columnFamilyName info 2024-11-23T19:37:33,732 DEBUG [StoreOpener-f669b96bafe8323b074c81d49adfb5bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:37:33,733 INFO [StoreOpener-f669b96bafe8323b074c81d49adfb5bb-1 {}] regionserver.HStore(327): Store=f669b96bafe8323b074c81d49adfb5bb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:37:33,733 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,734 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,734 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,735 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,735 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,738 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,742 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:37:33,742 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f669b96bafe8323b074c81d49adfb5bb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780747, jitterRate=-0.00722917914390564}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:37:33,743 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:33,745 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f669b96bafe8323b074c81d49adfb5bb: Running coprocessor pre-open hook at 1732390653728Writing region info on filesystem at 1732390653728Initializing all the Stores at 1732390653729 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390653729Cleaning up temporary data from old regions at 1732390653735 (+6 ms)Running coprocessor post-open hooks at 1732390653743 (+8 ms)Region opened successfully at 1732390653744 (+1 ms) 2024-11-23T19:37:33,746 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb., pid=6, masterSystemTime=1732390653717 2024-11-23T19:37:33,750 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,750 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:33,751 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f669b96bafe8323b074c81d49adfb5bb, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,38387,1732390652126 2024-11-23T19:37:33,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f669b96bafe8323b074c81d49adfb5bb, server=387b213c044a,38387,1732390652126 because future has completed 2024-11-23T19:37:33,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T19:37:33,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f669b96bafe8323b074c81d49adfb5bb, server=387b213c044a,38387,1732390652126 in 194 msec 2024-11-23T19:37:33,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T19:37:33,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f669b96bafe8323b074c81d49adfb5bb, ASSIGN in 360 msec 2024-11-23T19:37:33,762 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T19:37:33,763 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390653762"}]},"ts":"1732390653762"} 2024-11-23T19:37:33,764 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-23T19:37:33,765 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T19:37:33,767 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 396 msec 2024-11-23T19:37:34,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:34,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:34,534 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:37:34,535 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T19:37:34,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T19:37:34,536 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-23T19:37:34,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:37:34,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T19:37:35,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:35,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:36,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:36,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:37,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:37,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:38,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,251 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:38,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:38,758 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:37:38,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:37:38,790 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T19:37:38,790 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-23T19:37:39,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:39,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:40,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:40,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:41,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:41,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:42,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:42,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43349 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:37:43,440 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-23T19:37:43,441 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-23T19:37:43,447 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T19:37:43,447 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:43,454 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb., hostname=387b213c044a,38387,1732390652126, seqNum=2] 2024-11-23T19:37:43,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:43,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:44,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:44,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:45,459 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:45,460 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:45,460 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:45,460 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:45,461 WARN [DataStreamer for file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta block BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43857,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK], DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43857,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]) is bad. 2024-11-23T19:37:45,461 WARN [DataStreamer for file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 block BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43857,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK], DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43857,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]) is bad. 2024-11-23T19:37:45,462 WARN [DataStreamer for file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 block BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43857,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK], DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43857,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]) is bad. 2024-11-23T19:37:45,462 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:38414 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38414 dst: /127.0.0.1:43857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_315575667_22 at /127.0.0.1:38392 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38392 dst: /127.0.0.1:43857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:59610 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59610 dst: /127.0.0.1:42909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_315575667_22 at /127.0.0.1:59586 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59586 dst: /127.0.0.1:42909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:38430 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38430 dst: /127.0.0.1:43857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:59614 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59614 dst: /127.0.0.1:42909 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:45,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:45,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57faf252{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:45,527 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50b10539{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:45,527 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:45,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41d5af00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:45,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16ec9c96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:45,529 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:45,529 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:45,529 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109659227-172.17.0.3-1732390649642 (Datanode Uuid ec686ada-82a9-4b0c-b575-124e8c816694) service to localhost/127.0.0.1:40593 2024-11-23T19:37:45,529 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:45,529 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data3/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:45,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data4/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:45,530 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:45,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:45,541 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:45,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:45,542 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:45,543 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:37:45,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b3cf8f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:45,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@717499fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:45,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5bc55163{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-42735-hadoop-hdfs-3_4_1-tests_jar-_-any-15977717671646752847/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:45,639 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@571de0fd{HTTP/1.1, (http/1.1)}{localhost:42735} 2024-11-23T19:37:45,639 INFO [Time-limited test {}] server.Server(415): Started @178169ms 2024-11-23T19:37:45,640 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:45,657 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:45,657 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:45,657 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:45,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:51796 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51796 dst: /127.0.0.1:42909 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:51798 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51798 dst: /127.0.0.1:42909 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_315575667_22 at /127.0.0.1:51810 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51810 dst: /127.0.0.1:42909 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:45,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@393a832c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:45,662 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@301612e4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:45,662 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:45,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a7cb65f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:45,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9982f0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:45,663 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:45,663 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:45,663 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109659227-172.17.0.3-1732390649642 (Datanode Uuid 7b35383a-9ed3-4eee-81fb-2342f84cf290) service to localhost/127.0.0.1:40593 2024-11-23T19:37:45,663 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:45,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data1/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:45,664 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data2/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:45,664 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:45,675 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:45,678 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:45,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:45,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:45,679 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:37:45,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@592e51be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:45,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3146549d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:45,773 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4307cd3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-35699-hadoop-hdfs-3_4_1-tests_jar-_-any-1622269202325431110/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:45,774 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17312068{HTTP/1.1, (http/1.1)}{localhost:35699} 2024-11-23T19:37:45,774 INFO [Time-limited test {}] server.Server(415): Started @178304ms 2024-11-23T19:37:45,775 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:46,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:46,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:46,717 WARN [Thread-1346 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:46,720 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4657675775e6e36 with lease ID 0xb6b7783695dfa22d: from storage DS-fc4c7074-4d51-44bd-9635-ef051106be7d node DatanodeRegistration(127.0.0.1:45107, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=45029, infoSecurePort=0, ipcPort=41337, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:46,720 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4657675775e6e36 with lease ID 0xb6b7783695dfa22d: from storage DS-e4da55b2-d014-437e-92bc-9ab52d495339 node DatanodeRegistration(127.0.0.1:45107, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=45029, infoSecurePort=0, ipcPort=41337, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:46,793 WARN [Thread-1366 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:46,796 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x765ad1d4b309d282 with lease ID 0xb6b7783695dfa22e: from storage DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9 node DatanodeRegistration(127.0.0.1:37333, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=39515, infoSecurePort=0, ipcPort=40991, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:46,796 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x765ad1d4b309d282 with lease ID 0xb6b7783695dfa22e: from storage DS-8f13d30f-ede1-4946-bca4-8111187ae08d node DatanodeRegistration(127.0.0.1:37333, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=39515, infoSecurePort=0, ipcPort=40991, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:46,797 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-23T19:37:46,799 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-23T19:37:46,800 ERROR [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:46,800 WARN [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:46,801 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C38387%2C1732390652126:(num 1732390652746) roll requested 2024-11-23T19:37:46,801 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:46,806 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 newFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:46,806 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:46,806 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:46,807 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:46,807 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:46,807 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:46,807 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:46,807 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:46,807 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:46,808 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:46,808 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39515:39515),(127.0.0.1/127.0.0.1:45029:45029)] 2024-11-23T19:37:46,808 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 is not closed yet, will try archiving it next time 2024-11-23T19:37:46,808 WARN [IPC Server handler 0 on default port 40593 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-23T19:37:46,808 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 after 0ms 2024-11-23T19:37:47,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:47,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:48,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:48,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:48,721 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T19:37:48,811 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-23T19:37:49,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:49,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:50,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:50,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:50,809 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 after 4001ms 2024-11-23T19:37:50,813 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:50,814 WARN [DataStreamer for file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 block BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37333,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK], DatanodeInfoWithStorage[127.0.0.1:45107,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37333,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]) is bad. 2024-11-23T19:37:50,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:34912 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45107:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34912 dst: /127.0.0.1:45107 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:50,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:35902 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37333:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35902 dst: /127.0.0.1:37333 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:50,866 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4307cd3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:50,866 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17312068{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:50,866 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:50,866 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3146549d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:50,866 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@592e51be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:50,868 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:50,868 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:50,868 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:50,868 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109659227-172.17.0.3-1732390649642 (Datanode Uuid 7b35383a-9ed3-4eee-81fb-2342f84cf290) service to localhost/127.0.0.1:40593 2024-11-23T19:37:50,868 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data1/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:50,868 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data2/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:50,869 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:50,877 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:50,880 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:50,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:50,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:50,881 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:37:50,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@730e0fff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:50,882 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@518d8d55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:50,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38ca15c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-33115-hadoop-hdfs-3_4_1-tests_jar-_-any-6242603206913152262/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:50,974 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6573e60c{HTTP/1.1, (http/1.1)}{localhost:33115} 2024-11-23T19:37:50,974 INFO [Time-limited test {}] server.Server(415): Started @183505ms 2024-11-23T19:37:50,976 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:51,001 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:51,001 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1094966324_22 at /127.0.0.1:34922 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45107:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34922 dst: /127.0.0.1:45107 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:51,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5bc55163{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:51,070 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@571de0fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:37:51,070 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:37:51,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@717499fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:37:51,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b3cf8f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:37:51,072 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:37:51,072 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:37:51,072 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:37:51,072 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109659227-172.17.0.3-1732390649642 (Datanode Uuid ec686ada-82a9-4b0c-b575-124e8c816694) service to localhost/127.0.0.1:40593 2024-11-23T19:37:51,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data3/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:51,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data4/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:37:51,073 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:37:51,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:37:51,083 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:37:51,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:37:51,084 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:37:51,084 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:37:51,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51065df5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:37:51,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc7279c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:37:51,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2dd41fe4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/java.io.tmpdir/jetty-localhost-44475-hadoop-hdfs-3_4_1-tests_jar-_-any-8812870161062158061/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:37:51,179 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3420abff{HTTP/1.1, (http/1.1)}{localhost:44475} 2024-11-23T19:37:51,179 INFO [Time-limited test {}] server.Server(415): Started @183710ms 2024-11-23T19:37:51,181 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:37:51,460 WARN [Thread-1420 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:51,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca67c4000fde223d with lease ID 0xb6b7783695dfa22f: from storage DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9 node DatanodeRegistration(127.0.0.1:38217, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=39251, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:51,462 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca67c4000fde223d with lease ID 0xb6b7783695dfa22f: from storage DS-8f13d30f-ede1-4946-bca4-8111187ae08d node DatanodeRegistration(127.0.0.1:38217, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=39251, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:51,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:51,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:51,675 WARN [Thread-1440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:37:51,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1964ad989cd54b12 with lease ID 0xb6b7783695dfa230: from storage DS-fc4c7074-4d51-44bd-9635-ef051106be7d node DatanodeRegistration(127.0.0.1:32927, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=42737, infoSecurePort=0, ipcPort=42675, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:37:51,678 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1964ad989cd54b12 with lease ID 0xb6b7783695dfa230: from storage DS-e4da55b2-d014-437e-92bc-9ab52d495339 node DatanodeRegistration(127.0.0.1:32927, datanodeUuid=ec686ada-82a9-4b0c-b575-124e8c816694, infoPort=42737, infoSecurePort=0, ipcPort=42675, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T19:37:52,199 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-23T19:37:52,204 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-23T19:37:52,207 ERROR [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45107,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:52,207 WARN [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45107,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:52,207 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C38387%2C1732390652126:(num 1732390666801) roll requested 2024-11-23T19:37:52,208 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.1732390672208 2024-11-23T19:37:52,215 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 newFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 2024-11-23T19:37:52,215 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:52,215 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:52,215 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:52,215 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:52,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:52,216 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 2024-11-23T19:37:52,216 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45107,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:52,216 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45107,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:52,216 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:52,216 WARN [IPC Server handler 3 on default port 40593 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-23T19:37:52,217 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 after 1ms 2024-11-23T19:37:52,220 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42737:42737),(127.0.0.1/127.0.0.1:39251:39251)] 2024-11-23T19:37:52,220 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 is not closed yet, will try archiving it next time 2024-11-23T19:37:52,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:52,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:53,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:53,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:54,223 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:54,234 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 newFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:54,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:54,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:54,235 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:54,235 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:54,235 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:54,235 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:54,236 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39251:39251),(127.0.0.1/127.0.0.1:42737:42737)] 2024-11-23T19:37:54,236 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 is not closed yet, will try archiving it next time 2024-11-23T19:37:54,236 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 is not closed yet, will try archiving it next time 2024-11-23T19:37:54,237 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:54,237 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:54,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741838_1019 (size=1264) 2024-11-23T19:37:54,238 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 after 0ms 2024-11-23T19:37:54,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741838_1019 (size=1264) 2024-11-23T19:37:54,238 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:54,238 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 is not closed yet, will try archiving it next time 2024-11-23T19:37:54,249 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732390653745/Put/vlen=218/seqid=0] 2024-11-23T19:37:54,249 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732390663456/Put/vlen=1045/seqid=0] 2024-11-23T19:37:54,249 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390652746 2024-11-23T19:37:54,249 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:54,249 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:54,250 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 after 1ms 2024-11-23T19:37:54,250 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:54,253 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732390666800/Put/vlen=1045/seqid=0] 2024-11-23T19:37:54,253 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732390668812/Put/vlen=1045/seqid=0] 2024-11-23T19:37:54,253 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 2024-11-23T19:37:54,253 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 2024-11-23T19:37:54,253 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 2024-11-23T19:37:54,253 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 after 0ms 2024-11-23T19:37:54,254 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390672208 2024-11-23T19:37:54,256 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732390672206/Put/vlen=1045/seqid=0] 2024-11-23T19:37:54,256 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:54,256 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:54,257 WARN [IPC Server handler 1 on default port 40593 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-23T19:37:54,257 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 after 1ms 2024-11-23T19:37:54,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:54,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:54,681 WARN [ResponseProcessor for block BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:54,681 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_315575667_22 at /127.0.0.1:57006 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57006 dst: /127.0.0.1:38217 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38217 remote=/127.0.0.1:57006]. Total timeout mills is 60000, 59553 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:54,681 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_315575667_22 at /127.0.0.1:51276 [Receiving block BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32927:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51276 dst: /127.0.0.1:32927 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:37:54,681 WARN [DataStreamer for file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 block BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38217,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK], DatanodeInfoWithStorage[127.0.0.1:32927,DS-fc4c7074-4d51-44bd-9635-ef051106be7d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38217,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]) is bad. 2024-11-23T19:37:54,684 WARN [DataStreamer for file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 block BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:54,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741839_1022 (size=85) 2024-11-23T19:37:55,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:55,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:56,219 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390666801 after 4003ms 2024-11-23T19:37:56,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:56,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:57,466 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T19:37:57,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:57,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:58,259 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 after 4003ms 2024-11-23T19:37:58,259 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:58,267 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:58,267 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f669b96bafe8323b074c81d49adfb5bb 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-23T19:37:58,268 ERROR [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,268 WARN [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,269 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C38387%2C1732390652126:(num 1732390674222) roll requested 2024-11-23T19:37:58,269 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.1732390678269 2024-11-23T19:37:58,277 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 newFile=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390678269 2024-11-23T19:37:58,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,278 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390678269 2024-11-23T19:37:58,278 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,278 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-109659227-172.17.0.3-1732390649642:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,279 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:58,279 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 after 0ms 2024-11-23T19:37:58,279 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42737:42737),(127.0.0.1/127.0.0.1:39251:39251)] 2024-11-23T19:37:58,280 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.1732390674222 to hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs/387b213c044a%2C38387%2C1732390652126.1732390674222 2024-11-23T19:37:58,296 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/.tmp/info/7226ac8e1f844d31bea7d4395b860112 is 1080, key is row1002/info:/1732390663456/Put/seqid=0 2024-11-23T19:37:58,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741841_1024 (size=9270) 2024-11-23T19:37:58,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741841_1024 (size=9270) 2024-11-23T19:37:58,302 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/.tmp/info/7226ac8e1f844d31bea7d4395b860112 2024-11-23T19:37:58,308 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/.tmp/info/7226ac8e1f844d31bea7d4395b860112 as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/info/7226ac8e1f844d31bea7d4395b860112 2024-11-23T19:37:58,313 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/info/7226ac8e1f844d31bea7d4395b860112, entries=4, sequenceid=8, filesize=9.1 K 2024-11-23T19:37:58,315 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for f669b96bafe8323b074c81d49adfb5bb in 47ms, sequenceid=8, compaction requested=false 2024-11-23T19:37:58,315 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f669b96bafe8323b074c81d49adfb5bb: 2024-11-23T19:37:58,315 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-23T19:37:58,315 ERROR [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,315 WARN [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426-prefix:387b213c044a,38387,1732390652126.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,315 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C38387%2C1732390652126.meta:.meta(num 1732390653218) roll requested 2024-11-23T19:37:58,315 INFO [regionserver/387b213c044a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C38387%2C1732390652126.meta.1732390678315.meta 2024-11-23T19:37:58,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,320 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,320 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,320 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:58,320 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390678315.meta 2024-11-23T19:37:58,321 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,321 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:37:58,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta 2024-11-23T19:37:58,321 WARN [IPC Server handler 3 on default port 40593 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-23T19:37:58,321 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39251:39251),(127.0.0.1/127.0.0.1:42737:42737)] 2024-11-23T19:37:58,321 DEBUG [regionserver/387b213c044a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta is not closed yet, will try archiving it next time 2024-11-23T19:37:58,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta after 0ms 2024-11-23T19:37:58,338 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/info/f9f4bd436446452282eb18d8f52b35d9 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb./info:regioninfo/1732390653751/Put/seqid=0 2024-11-23T19:37:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741843_1027 (size=7125) 2024-11-23T19:37:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741843_1027 (size=7125) 2024-11-23T19:37:58,343 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/info/f9f4bd436446452282eb18d8f52b35d9 2024-11-23T19:37:58,361 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/ns/88ee70f3330d48fcbc360bb5e90ccf48 is 43, key is default/ns:d/1732390653316/Put/seqid=0 2024-11-23T19:37:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741844_1028 (size=5153) 2024-11-23T19:37:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741844_1028 (size=5153) 2024-11-23T19:37:58,366 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/ns/88ee70f3330d48fcbc360bb5e90ccf48 2024-11-23T19:37:58,390 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/table/131d7c58991f452aa41a18d05d56609d is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732390653762/Put/seqid=0 2024-11-23T19:37:58,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741845_1029 (size=5438) 2024-11-23T19:37:58,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741845_1029 (size=5438) 2024-11-23T19:37:58,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:58,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:58,797 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/table/131d7c58991f452aa41a18d05d56609d 2024-11-23T19:37:58,808 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/info/f9f4bd436446452282eb18d8f52b35d9 as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/info/f9f4bd436446452282eb18d8f52b35d9 2024-11-23T19:37:58,816 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/info/f9f4bd436446452282eb18d8f52b35d9, entries=10, sequenceid=11, filesize=7.0 K 2024-11-23T19:37:58,817 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/ns/88ee70f3330d48fcbc360bb5e90ccf48 as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/ns/88ee70f3330d48fcbc360bb5e90ccf48 2024-11-23T19:37:58,823 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/ns/88ee70f3330d48fcbc360bb5e90ccf48, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T19:37:58,824 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/.tmp/table/131d7c58991f452aa41a18d05d56609d as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/table/131d7c58991f452aa41a18d05d56609d 2024-11-23T19:37:58,830 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/table/131d7c58991f452aa41a18d05d56609d, entries=2, sequenceid=11, filesize=5.3 K 2024-11-23T19:37:58,832 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 516ms, sequenceid=11, compaction requested=false 2024-11-23T19:37:58,832 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-23T19:37:58,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:37:58,837 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:37:58,837 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:58,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:58,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:58,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:37:58,837 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:37:58,837 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2001403329, stopped=false 2024-11-23T19:37:58,837 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,43349,1732390651919 2024-11-23T19:37:58,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:58,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:37:58,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:58,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:37:58,968 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:37:58,969 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:37:58,969 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:58,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:58,969 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:58,969 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:37:58,970 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,38387,1732390652126' ***** 2024-11-23T19:37:58,970 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:37:58,971 INFO [RS:0;387b213c044a:38387 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:37:58,971 INFO [RS:0;387b213c044a:38387 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:37:58,971 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:37:58,971 INFO [RS:0;387b213c044a:38387 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:37:58,971 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(3091): Received CLOSE for f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:58,972 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,38387,1732390652126 2024-11-23T19:37:58,972 INFO [RS:0;387b213c044a:38387 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:37:58,972 INFO [RS:0;387b213c044a:38387 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:38387. 2024-11-23T19:37:58,972 DEBUG [RS:0;387b213c044a:38387 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:37:58,972 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f669b96bafe8323b074c81d49adfb5bb, disabling compactions & flushes 2024-11-23T19:37:58,972 DEBUG [RS:0;387b213c044a:38387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:37:58,972 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:58,972 INFO [RS:0;387b213c044a:38387 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:37:58,972 INFO [RS:0;387b213c044a:38387 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:37:58,972 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:58,973 INFO [RS:0;387b213c044a:38387 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:37:58,973 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. after waiting 0 ms 2024-11-23T19:37:58,973 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:37:58,973 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:58,974 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T19:37:58,974 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1325): Online Regions={f669b96bafe8323b074c81d49adfb5bb=TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T19:37:58,974 DEBUG [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f669b96bafe8323b074c81d49adfb5bb 2024-11-23T19:37:58,974 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:37:58,974 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:37:58,974 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:37:58,975 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:37:58,975 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:37:58,979 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/default/TestLogRolling-testLogRollOnPipelineRestart/f669b96bafe8323b074c81d49adfb5bb/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-23T19:37:58,979 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T19:37:58,979 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:58,980 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:37:58,980 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f669b96bafe8323b074c81d49adfb5bb: Waiting for close lock at 1732390678972Running coprocessor pre-close hooks at 1732390678972Disabling compacts and flushes for region at 1732390678972Disabling writes for close at 1732390678973 (+1 ms)Writing region close event to WAL at 1732390678974 (+1 ms)Running coprocessor post-close hooks at 1732390678979 (+5 ms)Closed at 1732390678979 2024-11-23T19:37:58,980 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:37:58,980 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390678974Running coprocessor pre-close hooks at 1732390678974Disabling compacts and flushes for region at 1732390678974Disabling writes for close at 1732390678975 (+1 ms)Writing region close event to WAL at 1732390678976 (+1 ms)Running coprocessor post-close hooks at 1732390678979 (+3 ms)Closed at 1732390678980 (+1 ms) 2024-11-23T19:37:58,980 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732390653369.f669b96bafe8323b074c81d49adfb5bb. 2024-11-23T19:37:58,980 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:37:59,175 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,38387,1732390652126; all regions closed. 2024-11-23T19:37:59,176 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:59,177 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:59,177 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:59,177 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:59,178 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:37:59,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741842_1025 (size=825) 2024-11-23T19:37:59,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741842_1025 (size=825) 2024-11-23T19:37:59,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:59,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:37:59,610 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T19:37:59,610 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T19:38:00,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:00,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:00,612 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:38:00,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T19:38:01,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:01,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:01,898 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:38:02,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta after 4002ms 2024-11-23T19:38:02,324 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/WALs/387b213c044a,38387,1732390652126/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta to hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs/387b213c044a%2C38387%2C1732390652126.meta.1732390653218.meta 2024-11-23T19:38:02,332 DEBUG [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs 2024-11-23T19:38:02,332 INFO [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C38387%2C1732390652126.meta:.meta(num 1732390678315) 2024-11-23T19:38:02,333 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,334 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,334 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,335 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741840_1023 (size=1162) 2024-11-23T19:38:02,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741840_1023 (size=1162) 2024-11-23T19:38:02,341 DEBUG [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs 2024-11-23T19:38:02,341 INFO [RS:0;387b213c044a:38387 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C38387%2C1732390652126:(num 1732390678269) 2024-11-23T19:38:02,341 DEBUG [RS:0;387b213c044a:38387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:38:02,341 INFO [RS:0;387b213c044a:38387 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:38:02,341 INFO [RS:0;387b213c044a:38387 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:38:02,341 INFO [RS:0;387b213c044a:38387 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:38:02,341 INFO [RS:0;387b213c044a:38387 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:38:02,342 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:38:02,342 INFO [RS:0;387b213c044a:38387 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38387 2024-11-23T19:38:02,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:38:02,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,38387,1732390652126 2024-11-23T19:38:02,388 INFO [RS:0;387b213c044a:38387 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:38:02,399 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,38387,1732390652126] 2024-11-23T19:38:02,409 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,38387,1732390652126 already deleted, retry=false 2024-11-23T19:38:02,409 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,38387,1732390652126 expired; onlineServers=0 2024-11-23T19:38:02,409 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,43349,1732390651919' ***** 2024-11-23T19:38:02,409 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:38:02,409 INFO [M:0;387b213c044a:43349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:38:02,409 INFO [M:0;387b213c044a:43349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:38:02,409 DEBUG [M:0;387b213c044a:43349 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:38:02,410 DEBUG [M:0;387b213c044a:43349 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:38:02,410 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:38:02,410 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390652477 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390652477,5,FailOnTimeoutGroup] 2024-11-23T19:38:02,410 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390652477 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390652477,5,FailOnTimeoutGroup] 2024-11-23T19:38:02,410 INFO [M:0;387b213c044a:43349 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:38:02,411 INFO [M:0;387b213c044a:43349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:38:02,411 DEBUG [M:0;387b213c044a:43349 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:38:02,411 INFO [M:0;387b213c044a:43349 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:38:02,411 INFO [M:0;387b213c044a:43349 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:38:02,411 INFO [M:0;387b213c044a:43349 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:38:02,412 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:38:02,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:38:02,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:02,420 DEBUG [M:0;387b213c044a:43349 {}] zookeeper.ZKUtil(347): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:38:02,420 WARN [M:0;387b213c044a:43349 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:38:02,421 INFO [M:0;387b213c044a:43349 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/.lastflushedseqids 2024-11-23T19:38:02,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741846_1030 (size=111) 2024-11-23T19:38:02,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741846_1030 (size=111) 2024-11-23T19:38:02,432 INFO [M:0;387b213c044a:43349 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:38:02,433 INFO [M:0;387b213c044a:43349 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:38:02,433 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:38:02,433 INFO [M:0;387b213c044a:43349 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:02,433 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:02,433 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:38:02,433 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:02,433 INFO [M:0;387b213c044a:43349 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-23T19:38:02,434 ERROR [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData-prefix:387b213c044a,43349,1732390651919 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:38:02,434 WARN [FSHLog-0-hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData-prefix:387b213c044a,43349,1732390651919 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:38:02,434 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 387b213c044a%2C43349%2C1732390651919:(num 1732390652264) roll requested 2024-11-23T19:38:02,434 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43349%2C1732390651919.1732390682434 2024-11-23T19:38:02,441 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,441 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,441 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,441 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,442 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,442 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390682434 2024-11-23T19:38:02,442 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:38:02,442 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42909,DS-969056f4-eaf4-4129-9167-3cdd5c1e2fb9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-23T19:38:02,443 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 2024-11-23T19:38:02,443 WARN [IPC Server handler 4 on default port 40593 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-23T19:38:02,443 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 after 0ms 2024-11-23T19:38:02,443 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42737:42737),(127.0.0.1/127.0.0.1:39251:39251)] 2024-11-23T19:38:02,444 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 is not closed yet, will try archiving it next time 2024-11-23T19:38:02,459 DEBUG [M:0;387b213c044a:43349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c42d4d18ee684a7e8b73949b76ea1258 is 82, key is hbase:meta,,1/info:regioninfo/1732390653245/Put/seqid=0 2024-11-23T19:38:02,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741848_1033 (size=5672) 2024-11-23T19:38:02,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741848_1033 (size=5672) 2024-11-23T19:38:02,464 INFO [M:0;387b213c044a:43349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c42d4d18ee684a7e8b73949b76ea1258 2024-11-23T19:38:02,485 DEBUG [M:0;387b213c044a:43349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/458938a5477446bea31288fdf2875f5f is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732390653766/Put/seqid=0 2024-11-23T19:38:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741849_1034 (size=6117) 2024-11-23T19:38:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741849_1034 (size=6117) 2024-11-23T19:38:02,490 INFO [M:0;387b213c044a:43349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/458938a5477446bea31288fdf2875f5f 2024-11-23T19:38:02,499 INFO [RS:0;387b213c044a:38387 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:38:02,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:38:02,499 INFO [RS:0;387b213c044a:38387 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,38387,1732390652126; zookeeper connection closed. 2024-11-23T19:38:02,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38387-0x101693383a80001, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:38:02,499 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52875cd5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52875cd5 2024-11-23T19:38:02,499 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T19:38:02,510 DEBUG [M:0;387b213c044a:43349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5abfe0c4e7784132a4beb8b5dad98291 is 69, key is 387b213c044a,38387,1732390652126/rs:state/1732390652584/Put/seqid=0 2024-11-23T19:38:02,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741850_1035 (size=5156) 2024-11-23T19:38:02,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741850_1035 (size=5156) 2024-11-23T19:38:02,515 INFO [M:0;387b213c044a:43349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5abfe0c4e7784132a4beb8b5dad98291 2024-11-23T19:38:02,533 DEBUG [M:0;387b213c044a:43349 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0019d34bf58447b5ae8f2750c6bca0b1 is 52, key is load_balancer_on/state:d/1732390653364/Put/seqid=0 2024-11-23T19:38:02,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741851_1036 (size=5056) 2024-11-23T19:38:02,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741851_1036 (size=5056) 2024-11-23T19:38:02,538 INFO [M:0;387b213c044a:43349 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0019d34bf58447b5ae8f2750c6bca0b1 2024-11-23T19:38:02,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:02,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:02,543 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c42d4d18ee684a7e8b73949b76ea1258 as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c42d4d18ee684a7e8b73949b76ea1258 2024-11-23T19:38:02,547 INFO [M:0;387b213c044a:43349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c42d4d18ee684a7e8b73949b76ea1258, entries=8, sequenceid=56, filesize=5.5 K 2024-11-23T19:38:02,548 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/458938a5477446bea31288fdf2875f5f as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/458938a5477446bea31288fdf2875f5f 2024-11-23T19:38:02,554 INFO [M:0;387b213c044a:43349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/458938a5477446bea31288fdf2875f5f, entries=6, sequenceid=56, filesize=6.0 K 2024-11-23T19:38:02,555 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5abfe0c4e7784132a4beb8b5dad98291 as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5abfe0c4e7784132a4beb8b5dad98291 2024-11-23T19:38:02,560 INFO [M:0;387b213c044a:43349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5abfe0c4e7784132a4beb8b5dad98291, entries=1, sequenceid=56, filesize=5.0 K 2024-11-23T19:38:02,562 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0019d34bf58447b5ae8f2750c6bca0b1 as hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0019d34bf58447b5ae8f2750c6bca0b1 2024-11-23T19:38:02,567 INFO [M:0;387b213c044a:43349 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0019d34bf58447b5ae8f2750c6bca0b1, entries=1, sequenceid=56, filesize=4.9 K 2024-11-23T19:38:02,568 INFO [M:0;387b213c044a:43349 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false 2024-11-23T19:38:02,569 INFO [M:0;387b213c044a:43349 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:02,570 DEBUG [M:0;387b213c044a:43349 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390682433Disabling compacts and flushes for region at 1732390682433Disabling writes for close at 1732390682433Obtaining lock to block concurrent updates at 1732390682433Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390682433Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1732390682434 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390682444 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390682444Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390682458 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390682458Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390682470 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390682484 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390682484Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390682495 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390682509 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390682509Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390682519 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390682532 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390682532Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e2a9ffa: reopening flushed file at 1732390682542 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17c14d68: reopening flushed file at 1732390682548 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57cbb737: reopening flushed file at 1732390682554 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bd1b12e: reopening flushed file at 1732390682561 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false at 1732390682568 (+7 ms)Writing region close event to WAL at 1732390682569 (+1 ms)Closed at 1732390682569 2024-11-23T19:38:02,570 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,570 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,570 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,570 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,570 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:02,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38217 is added to blk_1073741847_1031 (size=757) 2024-11-23T19:38:02,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32927 is added to blk_1073741847_1031 (size=757) 2024-11-23T19:38:03,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:03,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:03,548 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f24b59f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38217, datanodeUuid=7b35383a-9ed3-4eee-81fb-2342f84cf290, infoPort=39251, infoSecurePort=0, ipcPort=38939, storageInfo=lv=-57;cid=testClusterID;nsid=1674786499;c=1732390649642):Failed to transfer BP-109659227-172.17.0.3-1732390649642:blk_1073741830_1032 to 127.0.0.1:32927 got java.net.SocketException: Original Exception : java.io.IOException: Connection reset by peer at sun.nio.ch.FileChannelImpl.transferTo0(Native Method) ~[?:?] at sun.nio.ch.FileChannelImpl.transferToDirectlyInternal(FileChannelImpl.java:508) ~[?:?] at sun.nio.ch.FileChannelImpl.transferToDirectly(FileChannelImpl.java:573) ~[?:?] at sun.nio.ch.FileChannelImpl.transferTo(FileChannelImpl.java:695) ~[?:?] at org.apache.hadoop.net.SocketOutputStream.transferToFully(SocketOutputStream.java:222) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.FileIoProvider.transferToSocketFully(FileIoProvider.java:278) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:619) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.doSendBlock(BlockSender.java:819) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:766) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3102) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Connection reset by peer ... 13 more 2024-11-23T19:38:03,679 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-23T19:38:03,981 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,006 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,514 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:38:04,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:04,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:04,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:04,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:38:04,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:38:04,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:38:04,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-23T19:38:05,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:05,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:06,445 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 after 4002ms 2024-11-23T19:38:06,446 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/WALs/387b213c044a,43349,1732390651919/387b213c044a%2C43349%2C1732390651919.1732390652264 to hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/oldWALs/387b213c044a%2C43349%2C1732390651919.1732390652264 2024-11-23T19:38:06,453 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/MasterData/oldWALs/387b213c044a%2C43349%2C1732390651919.1732390652264 to hdfs://localhost:40593/user/jenkins/test-data/af9857bd-b3b1-0cb0-5bc8-d848c3b39426/oldWALs/387b213c044a%2C43349%2C1732390651919.1732390652264$masterlocalwal$ 2024-11-23T19:38:06,453 INFO [M:0;387b213c044a:43349 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:38:06,453 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:38:06,453 INFO [M:0;387b213c044a:43349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43349 2024-11-23T19:38:06,454 INFO [M:0;387b213c044a:43349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:38:06,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:06,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:06,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:38:06,647 INFO [M:0;387b213c044a:43349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:38:06,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43349-0x101693383a80000, quorum=127.0.0.1:64930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:38:06,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2dd41fe4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:38:06,653 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3420abff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:38:06,653 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:38:06,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc7279c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:38:06,654 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51065df5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:38:06,656 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:38:06,656 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:38:06,656 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109659227-172.17.0.3-1732390649642 (Datanode Uuid ec686ada-82a9-4b0c-b575-124e8c816694) service to localhost/127.0.0.1:40593 2024-11-23T19:38:06,656 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:38:06,657 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data3/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:38:06,658 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data4/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:38:06,658 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:38:06,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38ca15c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:38:06,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6573e60c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:38:06,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:38:06,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@518d8d55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:38:06,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@730e0fff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:38:06,661 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:38:06,661 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:38:06,661 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:38:06,661 WARN [BP-109659227-172.17.0.3-1732390649642 heartbeating to localhost/127.0.0.1:40593 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-109659227-172.17.0.3-1732390649642 (Datanode Uuid 7b35383a-9ed3-4eee-81fb-2342f84cf290) service to localhost/127.0.0.1:40593 2024-11-23T19:38:06,662 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data1/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:38:06,662 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/cluster_a1fde3de-d9d3-794a-8988-cc9e205c8091/data/data2/current/BP-109659227-172.17.0.3-1732390649642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:38:06,663 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:38:06,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1974987b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:38:06,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10c583a0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:38:06,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:38:06,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e13d66c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:38:06,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@119a3311{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir/,STOPPED} 2024-11-23T19:38:06,674 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:38:06,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:38:06,700 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:40593 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40593 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40593 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40593 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40593 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40593 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40593 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:40593 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=294 (was 532), ProcessCount=11 (was 11), AvailableMemoryMB=2726 (was 2873) 2024-11-23T19:38:06,706 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=294, ProcessCount=11, AvailableMemoryMB=2726 2024-11-23T19:38:06,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.log.dir so I do NOT create it in target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc1532b5-4458-4e7b-45c4-cbbb8329d15c/hadoop.tmp.dir so I do NOT create it in target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849, deleteOnExit=true 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/test.cache.data in system properties and HBase conf 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:38:06,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:38:06,707 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:38:06,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:38:06,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:38:06,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:38:06,722 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:38:07,108 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:38:07,112 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:38:07,115 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:38:07,115 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:38:07,115 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:38:07,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:38:07,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a4b134d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:38:07,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc3ea71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:38:07,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2719a663{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/java.io.tmpdir/jetty-localhost-46589-hadoop-hdfs-3_4_1-tests_jar-_-any-4380753551887450409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:38:07,211 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1af676f5{HTTP/1.1, (http/1.1)}{localhost:46589} 2024-11-23T19:38:07,211 INFO [Time-limited test {}] server.Server(415): Started @199742ms 2024-11-23T19:38:07,222 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:38:07,475 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:38:07,477 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:38:07,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:38:07,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:38:07,478 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:38:07,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39f00f78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:38:07,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74b5ebca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:38:07,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:07,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:07,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5127cbf0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/java.io.tmpdir/jetty-localhost-43491-hadoop-hdfs-3_4_1-tests_jar-_-any-4784007267482632957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:38:07,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9b25e94{HTTP/1.1, (http/1.1)}{localhost:43491} 2024-11-23T19:38:07,572 INFO [Time-limited test {}] server.Server(415): Started @200103ms 2024-11-23T19:38:07,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:38:07,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:38:07,601 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:38:07,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:38:07,602 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:38:07,602 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T19:38:07,602 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ec76923{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:38:07,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f411ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:38:07,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c6abbb8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/java.io.tmpdir/jetty-localhost-46109-hadoop-hdfs-3_4_1-tests_jar-_-any-9646997898575869582/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:38:07,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@555a4a92{HTTP/1.1, (http/1.1)}{localhost:46109} 2024-11-23T19:38:07,697 INFO [Time-limited test {}] server.Server(415): Started @200228ms 2024-11-23T19:38:07,698 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:38:08,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:08,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:08,699 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data1/current/BP-834335505-172.17.0.3-1732390686733/current, will proceed with Du for space computation calculation, 2024-11-23T19:38:08,700 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data2/current/BP-834335505-172.17.0.3-1732390686733/current, will proceed with Du for space computation calculation, 2024-11-23T19:38:08,721 WARN [Thread-1624 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:38:08,723 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6761852304db7729 with lease ID 0xdbcde5d0f66ba269: Processing first storage report for DS-ef93074b-7664-42ef-a0ad-89d0541161a4 from datanode DatanodeRegistration(127.0.0.1:39749, datanodeUuid=b7ef4820-dc05-4b96-9952-ac870b07d58f, infoPort=40895, infoSecurePort=0, ipcPort=42979, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733) 2024-11-23T19:38:08,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6761852304db7729 with lease ID 0xdbcde5d0f66ba269: from storage DS-ef93074b-7664-42ef-a0ad-89d0541161a4 node DatanodeRegistration(127.0.0.1:39749, datanodeUuid=b7ef4820-dc05-4b96-9952-ac870b07d58f, infoPort=40895, infoSecurePort=0, ipcPort=42979, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:38:08,723 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6761852304db7729 with lease ID 0xdbcde5d0f66ba269: Processing first storage report for DS-efa0d46d-9699-4cdc-9bbf-468d5f1e19d0 from datanode DatanodeRegistration(127.0.0.1:39749, datanodeUuid=b7ef4820-dc05-4b96-9952-ac870b07d58f, infoPort=40895, infoSecurePort=0, ipcPort=42979, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733) 2024-11-23T19:38:08,723 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6761852304db7729 with lease ID 0xdbcde5d0f66ba269: from storage DS-efa0d46d-9699-4cdc-9bbf-468d5f1e19d0 node DatanodeRegistration(127.0.0.1:39749, datanodeUuid=b7ef4820-dc05-4b96-9952-ac870b07d58f, infoPort=40895, infoSecurePort=0, ipcPort=42979, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:38:08,844 WARN [Thread-1671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data3/current/BP-834335505-172.17.0.3-1732390686733/current, will proceed with Du for space computation calculation, 2024-11-23T19:38:08,844 WARN [Thread-1672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data4/current/BP-834335505-172.17.0.3-1732390686733/current, will proceed with Du for space computation calculation, 2024-11-23T19:38:08,863 WARN [Thread-1647 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:38:08,865 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xabfed700aed5a7bd with lease ID 0xdbcde5d0f66ba26a: Processing first storage report for DS-2bd481c2-8a3f-4bed-8aa3-3f045bb2af5a from datanode DatanodeRegistration(127.0.0.1:39783, datanodeUuid=347fc59d-5d84-4f5f-a89d-096d3191ace5, infoPort=44299, infoSecurePort=0, ipcPort=45965, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733) 2024-11-23T19:38:08,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xabfed700aed5a7bd with lease ID 0xdbcde5d0f66ba26a: from storage DS-2bd481c2-8a3f-4bed-8aa3-3f045bb2af5a node DatanodeRegistration(127.0.0.1:39783, datanodeUuid=347fc59d-5d84-4f5f-a89d-096d3191ace5, infoPort=44299, infoSecurePort=0, ipcPort=45965, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:38:08,865 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xabfed700aed5a7bd with lease ID 0xdbcde5d0f66ba26a: Processing first storage report for DS-ba0e82c2-b498-4a9e-bbe9-a126ba352958 from datanode DatanodeRegistration(127.0.0.1:39783, datanodeUuid=347fc59d-5d84-4f5f-a89d-096d3191ace5, infoPort=44299, infoSecurePort=0, ipcPort=45965, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733) 2024-11-23T19:38:08,866 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xabfed700aed5a7bd with lease ID 0xdbcde5d0f66ba26a: from storage DS-ba0e82c2-b498-4a9e-bbe9-a126ba352958 node DatanodeRegistration(127.0.0.1:39783, datanodeUuid=347fc59d-5d84-4f5f-a89d-096d3191ace5, infoPort=44299, infoSecurePort=0, ipcPort=45965, storageInfo=lv=-57;cid=testClusterID;nsid=182826951;c=1732390686733), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:38:08,933 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc 2024-11-23T19:38:08,959 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/zookeeper_0, clientPort=50342, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:38:08,960 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50342 2024-11-23T19:38:08,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:08,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:08,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:38:08,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:38:08,973 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868 with version=8 2024-11-23T19:38:08,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:38:08,976 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:38:08,976 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:38:08,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40023 2024-11-23T19:38:08,979 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40023 connecting to ZooKeeper ensemble=127.0.0.1:50342 2024-11-23T19:38:09,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:400230x0, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:38:09,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40023-0x101693414690000 connected 2024-11-23T19:38:09,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:09,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:09,280 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:38:09,280 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868, hbase.cluster.distributed=false 2024-11-23T19:38:09,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:38:09,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40023 2024-11-23T19:38:09,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40023 2024-11-23T19:38:09,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40023 2024-11-23T19:38:09,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40023 2024-11-23T19:38:09,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40023 2024-11-23T19:38:09,303 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:38:09,304 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:38:09,305 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43033 2024-11-23T19:38:09,306 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43033 connecting to ZooKeeper ensemble=127.0.0.1:50342 2024-11-23T19:38:09,306 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:09,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:09,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430330x0, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:38:09,321 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:430330x0, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:38:09,321 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43033-0x101693414690001 connected 2024-11-23T19:38:09,322 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:38:09,322 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:38:09,323 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:38:09,325 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:38:09,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43033 2024-11-23T19:38:09,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43033 2024-11-23T19:38:09,326 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43033 2024-11-23T19:38:09,326 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43033 2024-11-23T19:38:09,326 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43033 2024-11-23T19:38:09,342 DEBUG [M:0;387b213c044a:40023 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:40023 2024-11-23T19:38:09,342 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,40023,1732390688975 2024-11-23T19:38:09,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:38:09,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:38:09,353 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,40023,1732390688975 2024-11-23T19:38:09,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:38:09,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,363 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:38:09,364 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,40023,1732390688975 from backup master directory 2024-11-23T19:38:09,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:38:09,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,40023,1732390688975 2024-11-23T19:38:09,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:38:09,373 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:38:09,373 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,40023,1732390688975 2024-11-23T19:38:09,377 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/hbase.id] with ID: d58831f1-8a76-4bbd-96fb-2d94a24f30ae 2024-11-23T19:38:09,377 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/.tmp/hbase.id 2024-11-23T19:38:09,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:38:09,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:38:09,383 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/.tmp/hbase.id]:[hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/hbase.id] 2024-11-23T19:38:09,394 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:09,394 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:38:09,396 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T19:38:09,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:38:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:38:09,418 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:38:09,419 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:38:09,419 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:38:09,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:38:09,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:38:09,427 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store 2024-11-23T19:38:09,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:38:09,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:38:09,435 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:38:09,435 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:38:09,435 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:09,435 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:09,435 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:38:09,435 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:09,435 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:38:09,435 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390689435Disabling compacts and flushes for region at 1732390689435Disabling writes for close at 1732390689435Writing region close event to WAL at 1732390689435Closed at 1732390689435 2024-11-23T19:38:09,436 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/.initializing 2024-11-23T19:38:09,436 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/WALs/387b213c044a,40023,1732390688975 2024-11-23T19:38:09,439 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C40023%2C1732390688975, suffix=, logDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/WALs/387b213c044a,40023,1732390688975, archiveDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/oldWALs, maxLogs=10 2024-11-23T19:38:09,439 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C40023%2C1732390688975.1732390689439 2024-11-23T19:38:09,444 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/WALs/387b213c044a,40023,1732390688975/387b213c044a%2C40023%2C1732390688975.1732390689439 2024-11-23T19:38:09,445 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44299:44299),(127.0.0.1/127.0.0.1:40895:40895)] 2024-11-23T19:38:09,447 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:38:09,447 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:38:09,447 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,447 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,450 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:38:09,450 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:09,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:38:09,452 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,452 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:38:09,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:38:09,454 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:38:09,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,456 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:38:09,456 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:38:09,457 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,458 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,458 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,460 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,460 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,461 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:38:09,463 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:38:09,466 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:38:09,466 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822433, jitterRate=0.0457787960767746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:38:09,467 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390689447Initializing all the Stores at 1732390689448 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390689448Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390689448Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390689448Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390689448Cleaning up temporary data from old regions at 1732390689460 (+12 ms)Region opened successfully at 1732390689467 (+7 ms) 2024-11-23T19:38:09,468 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:38:09,471 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3007020e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:38:09,472 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:38:09,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:38:09,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:38:09,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:38:09,473 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T19:38:09,474 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T19:38:09,474 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:38:09,476 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:38:09,477 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:38:09,489 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:38:09,489 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:38:09,490 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:38:09,503 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:38:09,504 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:38:09,506 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:38:09,514 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:38:09,516 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:38:09,524 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:38:09,531 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:38:09,542 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:38:09,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:09,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:09,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:38:09,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:38:09,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,630 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,40023,1732390688975, sessionid=0x101693414690000, setting cluster-up flag (Was=false) 2024-11-23T19:38:09,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,703 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:38:09,706 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,40023,1732390688975 2024-11-23T19:38:09,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:09,756 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:38:09,759 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,40023,1732390688975 2024-11-23T19:38:09,762 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:38:09,765 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:38:09,766 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:38:09,766 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:38:09,766 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,40023,1732390688975 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:38:09,768 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,769 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390719769 2024-11-23T19:38:09,769 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:38:09,769 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,770 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:38:09,770 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:38:09,770 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:38:09,771 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:38:09,771 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:38:09,771 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390689771,5,FailOnTimeoutGroup] 2024-11-23T19:38:09,771 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390689771,5,FailOnTimeoutGroup] 2024-11-23T19:38:09,771 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,771 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:38:09,771 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,771 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,771 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,772 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:38:09,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:38:09,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:38:09,779 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:38:09,779 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868 2024-11-23T19:38:09,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:38:09,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:38:09,785 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:38:09,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:38:09,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:38:09,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:09,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:38:09,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:38:09,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:09,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:38:09,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:38:09,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:09,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:38:09,792 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:38:09,792 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:09,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:09,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:38:09,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740 2024-11-23T19:38:09,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740 2024-11-23T19:38:09,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:38:09,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:38:09,795 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:38:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:38:09,798 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:38:09,799 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793460, jitterRate=0.008936852216720581}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:38:09,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390689785Initializing all the Stores at 1732390689786 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390689786Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390689786Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390689786Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390689786Cleaning up temporary data from old regions at 1732390689795 (+9 ms)Region opened successfully at 1732390689799 (+4 ms) 2024-11-23T19:38:09,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:38:09,799 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:38:09,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:38:09,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:38:09,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:38:09,800 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:38:09,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390689799Disabling compacts and flushes for region at 1732390689799Disabling writes for close at 1732390689800 (+1 ms)Writing region close event to WAL at 1732390689800Closed at 1732390689800 2024-11-23T19:38:09,801 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:38:09,801 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:38:09,801 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:38:09,803 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:38:09,804 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:38:09,829 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(746): ClusterId : d58831f1-8a76-4bbd-96fb-2d94a24f30ae 2024-11-23T19:38:09,829 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:38:09,841 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:38:09,841 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:38:09,852 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:38:09,852 DEBUG [RS:0;387b213c044a:43033 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6593ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:38:09,862 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:43033 2024-11-23T19:38:09,862 INFO [RS:0;387b213c044a:43033 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:38:09,862 INFO [RS:0;387b213c044a:43033 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:38:09,862 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:38:09,863 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,40023,1732390688975 with port=43033, startcode=1732390689303 2024-11-23T19:38:09,863 DEBUG [RS:0;387b213c044a:43033 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:38:09,865 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49095, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:38:09,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40023 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,43033,1732390689303 2024-11-23T19:38:09,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40023 {}] master.ServerManager(517): Registering regionserver=387b213c044a,43033,1732390689303 2024-11-23T19:38:09,867 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868 2024-11-23T19:38:09,867 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42187 2024-11-23T19:38:09,867 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:38:09,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:38:09,879 DEBUG [RS:0;387b213c044a:43033 {}] zookeeper.ZKUtil(111): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,43033,1732390689303 2024-11-23T19:38:09,879 WARN [RS:0;387b213c044a:43033 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:38:09,879 INFO [RS:0;387b213c044a:43033 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:38:09,879 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303 2024-11-23T19:38:09,879 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,43033,1732390689303] 2024-11-23T19:38:09,882 INFO [RS:0;387b213c044a:43033 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:38:09,884 INFO [RS:0;387b213c044a:43033 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:38:09,884 INFO [RS:0;387b213c044a:43033 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:38:09,884 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,885 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:38:09,886 INFO [RS:0;387b213c044a:43033 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:38:09,886 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,886 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,887 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,887 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:38:09,887 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:38:09,887 DEBUG [RS:0;387b213c044a:43033 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:38:09,887 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,887 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,887 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,887 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,887 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,887 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43033,1732390689303-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:38:09,901 INFO [RS:0;387b213c044a:43033 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:38:09,901 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43033,1732390689303-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,901 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,901 INFO [RS:0;387b213c044a:43033 {}] regionserver.Replication(171): 387b213c044a,43033,1732390689303 started 2024-11-23T19:38:09,914 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:09,914 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,43033,1732390689303, RpcServer on 387b213c044a/172.17.0.3:43033, sessionid=0x101693414690001 2024-11-23T19:38:09,914 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:38:09,914 DEBUG [RS:0;387b213c044a:43033 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,43033,1732390689303 2024-11-23T19:38:09,914 DEBUG [RS:0;387b213c044a:43033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,43033,1732390689303' 2024-11-23T19:38:09,914 DEBUG [RS:0;387b213c044a:43033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,43033,1732390689303 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,43033,1732390689303' 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:38:09,915 DEBUG [RS:0;387b213c044a:43033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:38:09,916 DEBUG [RS:0;387b213c044a:43033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:38:09,916 INFO [RS:0;387b213c044a:43033 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:38:09,916 INFO [RS:0;387b213c044a:43033 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:38:09,954 WARN [387b213c044a:40023 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T19:38:10,020 INFO [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C43033%2C1732390689303, suffix=, logDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303, archiveDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/oldWALs, maxLogs=32 2024-11-23T19:38:10,021 INFO [RS:0;387b213c044a:43033 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43033%2C1732390689303.1732390690021 2024-11-23T19:38:10,028 INFO [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390690021 2024-11-23T19:38:10,029 DEBUG [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40895:40895),(127.0.0.1/127.0.0.1:44299:44299)] 2024-11-23T19:38:10,052 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:38:10,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,084 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:10,204 DEBUG [387b213c044a:40023 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:38:10,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,43033,1732390689303 2024-11-23T19:38:10,209 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,43033,1732390689303, state=OPENING 2024-11-23T19:38:10,262 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:38:10,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:10,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:38:10,274 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:38:10,274 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:38:10,274 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:38:10,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,43033,1732390689303}] 2024-11-23T19:38:10,431 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:38:10,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56255, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:38:10,443 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:38:10,443 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:38:10,447 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C43033%2C1732390689303.meta, suffix=.meta, logDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303, archiveDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/oldWALs, maxLogs=32 2024-11-23T19:38:10,447 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43033%2C1732390689303.meta.1732390690447.meta 2024-11-23T19:38:10,454 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.meta.1732390690447.meta 2024-11-23T19:38:10,457 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44299:44299),(127.0.0.1/127.0.0.1:40895:40895)] 2024-11-23T19:38:10,460 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:38:10,461 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:38:10,461 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:38:10,461 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:38:10,461 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:38:10,461 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:38:10,461 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:38:10,461 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:38:10,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:38:10,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:38:10,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:10,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:10,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:38:10,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:38:10,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:10,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:10,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:38:10,466 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:38:10,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:10,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:10,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:38:10,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:38:10,467 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:10,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:38:10,467 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:38:10,468 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740 2024-11-23T19:38:10,469 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740 2024-11-23T19:38:10,470 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:38:10,470 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:38:10,471 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:38:10,472 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:38:10,472 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778192, jitterRate=-0.010478600859642029}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:38:10,472 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:38:10,473 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390690461Writing region info on filesystem at 1732390690461Initializing all the Stores at 1732390690462 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390690462Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390690462Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390690462Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390690462Cleaning up temporary data from old regions at 1732390690470 (+8 ms)Running coprocessor post-open hooks at 1732390690472 (+2 ms)Region opened successfully at 1732390690473 (+1 ms) 2024-11-23T19:38:10,474 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390690431 2024-11-23T19:38:10,476 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:38:10,476 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:38:10,477 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,43033,1732390689303 2024-11-23T19:38:10,478 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,43033,1732390689303, state=OPEN 2024-11-23T19:38:10,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:38:10,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:38:10,518 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,43033,1732390689303 2024-11-23T19:38:10,518 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:38:10,518 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:38:10,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:38:10,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,43033,1732390689303 in 244 msec 2024-11-23T19:38:10,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:38:10,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 720 msec 2024-11-23T19:38:10,524 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:38:10,524 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:38:10,525 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:38:10,525 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,43033,1732390689303, seqNum=-1] 2024-11-23T19:38:10,526 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:38:10,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36219, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:38:10,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 768 msec 2024-11-23T19:38:10,532 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390690532, completionTime=-1 2024-11-23T19:38:10,532 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:38:10,532 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:38:10,534 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:38:10,534 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390750534 2024-11-23T19:38:10,534 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390810534 2024-11-23T19:38:10,534 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T19:38:10,534 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40023,1732390688975-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40023,1732390688975-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40023,1732390688975-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:40023, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,535 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,537 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.165sec 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40023,1732390688975-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:38:10,539 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40023,1732390688975-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:38:10,542 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:38:10,542 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:38:10,542 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40023,1732390688975-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:10,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:10,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:10,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2494db76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:38:10,630 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,40023,-1 for getting cluster id 2024-11-23T19:38:10,630 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:38:10,633 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd58831f1-8a76-4bbd-96fb-2d94a24f30ae' 2024-11-23T19:38:10,634 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:38:10,634 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d58831f1-8a76-4bbd-96fb-2d94a24f30ae" 2024-11-23T19:38:10,634 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6065c38c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:38:10,635 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,40023,-1] 2024-11-23T19:38:10,635 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:38:10,636 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:38:10,638 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35914, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:38:10,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27ff9b03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:38:10,639 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:38:10,640 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,43033,1732390689303, seqNum=-1] 2024-11-23T19:38:10,641 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:38:10,642 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51416, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:38:10,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,40023,1732390688975 2024-11-23T19:38:10,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:38:10,647 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:38:10,648 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T19:38:10,649 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 387b213c044a,40023,1732390688975 2024-11-23T19:38:10,649 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3a98173a 2024-11-23T19:38:10,649 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T19:38:10,651 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35928, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T19:38:10,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T19:38:10,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T19:38:10,651 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:38:10,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:10,654 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T19:38:10,655 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:10,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-23T19:38:10,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:38:10,656 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T19:38:10,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741835_1011 (size=405) 2024-11-23T19:38:10,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741835_1011 (size=405) 2024-11-23T19:38:10,666 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9c6e5b318bdd60aa7170518dd086ff88, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868 2024-11-23T19:38:10,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741836_1012 (size=88) 2024-11-23T19:38:10,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741836_1012 (size=88) 2024-11-23T19:38:10,674 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:38:10,674 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9c6e5b318bdd60aa7170518dd086ff88, disabling compactions & flushes 2024-11-23T19:38:10,674 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:10,674 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:10,674 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. after waiting 0 ms 2024-11-23T19:38:10,674 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:10,674 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:10,674 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9c6e5b318bdd60aa7170518dd086ff88: Waiting for close lock at 1732390690674Disabling compacts and flushes for region at 1732390690674Disabling writes for close at 1732390690674Writing region close event to WAL at 1732390690674Closed at 1732390690674 2024-11-23T19:38:10,675 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T19:38:10,676 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732390690675"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390690675"}]},"ts":"1732390690675"} 2024-11-23T19:38:10,678 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T19:38:10,679 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T19:38:10,680 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390690679"}]},"ts":"1732390690679"} 2024-11-23T19:38:10,682 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-23T19:38:10,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9c6e5b318bdd60aa7170518dd086ff88, ASSIGN}] 2024-11-23T19:38:10,684 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9c6e5b318bdd60aa7170518dd086ff88, ASSIGN 2024-11-23T19:38:10,685 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9c6e5b318bdd60aa7170518dd086ff88, ASSIGN; state=OFFLINE, location=387b213c044a,43033,1732390689303; forceNewPlan=false, retain=false 2024-11-23T19:38:10,837 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9c6e5b318bdd60aa7170518dd086ff88, regionState=OPENING, regionLocation=387b213c044a,43033,1732390689303 2024-11-23T19:38:10,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9c6e5b318bdd60aa7170518dd086ff88, ASSIGN because future has completed 2024-11-23T19:38:10,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c6e5b318bdd60aa7170518dd086ff88, server=387b213c044a,43033,1732390689303}] 2024-11-23T19:38:11,004 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:11,004 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9c6e5b318bdd60aa7170518dd086ff88, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:38:11,005 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,005 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:38:11,005 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,005 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,007 INFO [StoreOpener-9c6e5b318bdd60aa7170518dd086ff88-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,010 INFO [StoreOpener-9c6e5b318bdd60aa7170518dd086ff88-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9c6e5b318bdd60aa7170518dd086ff88 columnFamilyName info 2024-11-23T19:38:11,010 DEBUG [StoreOpener-9c6e5b318bdd60aa7170518dd086ff88-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:38:11,011 INFO [StoreOpener-9c6e5b318bdd60aa7170518dd086ff88-1 {}] regionserver.HStore(327): Store=9c6e5b318bdd60aa7170518dd086ff88/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:38:11,011 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,013 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,013 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,014 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,014 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,016 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,018 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:38:11,018 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9c6e5b318bdd60aa7170518dd086ff88; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828417, jitterRate=0.05338774621486664}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:38:11,018 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:38:11,019 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9c6e5b318bdd60aa7170518dd086ff88: Running coprocessor pre-open hook at 1732390691005Writing region info on filesystem at 1732390691005Initializing all the Stores at 1732390691007 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390691007Cleaning up temporary data from old regions at 1732390691014 (+7 ms)Running coprocessor post-open hooks at 1732390691018 (+4 ms)Region opened successfully at 1732390691019 (+1 ms) 2024-11-23T19:38:11,020 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88., pid=6, masterSystemTime=1732390690998 2024-11-23T19:38:11,022 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:11,022 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:11,023 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9c6e5b318bdd60aa7170518dd086ff88, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,43033,1732390689303 2024-11-23T19:38:11,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9c6e5b318bdd60aa7170518dd086ff88, server=387b213c044a,43033,1732390689303 because future has completed 2024-11-23T19:38:11,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T19:38:11,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9c6e5b318bdd60aa7170518dd086ff88, server=387b213c044a,43033,1732390689303 in 182 msec 2024-11-23T19:38:11,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T19:38:11,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=9c6e5b318bdd60aa7170518dd086ff88, ASSIGN in 346 msec 2024-11-23T19:38:11,033 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T19:38:11,033 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390691033"}]},"ts":"1732390691033"} 2024-11-23T19:38:11,035 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-23T19:38:11,036 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T19:38:11,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 384 msec 2024-11-23T19:38:11,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:11,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:12,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:12,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:13,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:13,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:14,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:38:14,535 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T19:38:14,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:38:14,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T19:38:14,537 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:38:14,537 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T19:38:14,538 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:14,538 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-23T19:38:14,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:14,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:15,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:15,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:15,967 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:38:15,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,990 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:15,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:38:16,002 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T19:38:16,002 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-23T19:38:16,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:16,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:17,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:17,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:18,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:18,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:19,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:19,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:20,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:20,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:20,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:38:20,710 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T19:38:20,711 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-23T19:38:20,722 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:20,722 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:20,725 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88., hostname=387b213c044a,43033,1732390689303, seqNum=2] 2024-11-23T19:38:20,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:20,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:20,738 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T19:38:20,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T19:38:20,739 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T19:38:20,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T19:38:20,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43033 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-23T19:38:20,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:20,905 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 9c6e5b318bdd60aa7170518dd086ff88 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T19:38:20,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/47511e94e3ac413a858b4f9d66f4ac9c is 1080, key is row0001/info:/1732390700726/Put/seqid=0 2024-11-23T19:38:20,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741837_1013 (size=6033) 2024-11-23T19:38:20,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741837_1013 (size=6033) 2024-11-23T19:38:20,932 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/47511e94e3ac413a858b4f9d66f4ac9c 2024-11-23T19:38:20,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/47511e94e3ac413a858b4f9d66f4ac9c as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/47511e94e3ac413a858b4f9d66f4ac9c 2024-11-23T19:38:20,943 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/47511e94e3ac413a858b4f9d66f4ac9c, entries=1, sequenceid=5, filesize=5.9 K 2024-11-23T19:38:20,944 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9c6e5b318bdd60aa7170518dd086ff88 in 39ms, sequenceid=5, compaction requested=false 2024-11-23T19:38:20,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 9c6e5b318bdd60aa7170518dd086ff88: 2024-11-23T19:38:20,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:20,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-23T19:38:20,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-23T19:38:20,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T19:38:20,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-11-23T19:38:20,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-11-23T19:38:21,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:21,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:22,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:22,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:23,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:23,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:24,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:24,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:25,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:25,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:26,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:26,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:27,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:27,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:28,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:28,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:29,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:29,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:30,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:30,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:30,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta after 68069ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:38:30,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 after 68080ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T19:38:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T19:38:30,800 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T19:38:30,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-23T19:38:30,812 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T19:38:30,813 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T19:38:30,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T19:38:30,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43033 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-23T19:38:30,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:30,970 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 9c6e5b318bdd60aa7170518dd086ff88 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T19:38:30,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/1bef32acdc874f3ea58b3335ee2fc8ce is 1080, key is row0002/info:/1732390710803/Put/seqid=0 2024-11-23T19:38:30,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741838_1014 (size=6033) 2024-11-23T19:38:30,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741838_1014 (size=6033) 2024-11-23T19:38:30,989 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/1bef32acdc874f3ea58b3335ee2fc8ce 2024-11-23T19:38:30,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/1bef32acdc874f3ea58b3335ee2fc8ce as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/1bef32acdc874f3ea58b3335ee2fc8ce 2024-11-23T19:38:31,003 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/1bef32acdc874f3ea58b3335ee2fc8ce, entries=1, sequenceid=9, filesize=5.9 K 2024-11-23T19:38:31,004 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9c6e5b318bdd60aa7170518dd086ff88 in 34ms, sequenceid=9, compaction requested=false 2024-11-23T19:38:31,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 9c6e5b318bdd60aa7170518dd086ff88: 2024-11-23T19:38:31,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:31,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-23T19:38:31,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-23T19:38:31,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-23T19:38:31,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-11-23T19:38:31,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-23T19:38:31,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:31,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:32,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:32,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:33,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:33,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:34,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:34,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:35,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:35,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:36,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:36,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:37,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:37,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:38,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:38,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:38,932 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:38:39,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:39,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:40,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:40,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-23T19:38:40,909 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T19:38:40,912 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43033%2C1732390689303.1732390720912 2024-11-23T19:38:40,925 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:40,925 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:40,925 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:40,925 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:40,925 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:40,925 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390690021 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390720912 2024-11-23T19:38:40,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741833_1009 (size=5546) 2024-11-23T19:38:40,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741833_1009 (size=5546) 2024-11-23T19:38:40,932 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40895:40895),(127.0.0.1/127.0.0.1:44299:44299)] 2024-11-23T19:38:40,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:40,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:40,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-23T19:38:40,936 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T19:38:40,937 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T19:38:40,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T19:38:41,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43033 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-23T19:38:41,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:41,091 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 9c6e5b318bdd60aa7170518dd086ff88 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T19:38:41,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/17982f3c3bf14efc90dfb4cebbb6731a is 1080, key is row0003/info:/1732390720910/Put/seqid=0 2024-11-23T19:38:41,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741840_1016 (size=6033) 2024-11-23T19:38:41,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741840_1016 (size=6033) 2024-11-23T19:38:41,109 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/17982f3c3bf14efc90dfb4cebbb6731a 2024-11-23T19:38:41,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/17982f3c3bf14efc90dfb4cebbb6731a as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/17982f3c3bf14efc90dfb4cebbb6731a 2024-11-23T19:38:41,128 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/17982f3c3bf14efc90dfb4cebbb6731a, entries=1, sequenceid=13, filesize=5.9 K 2024-11-23T19:38:41,129 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9c6e5b318bdd60aa7170518dd086ff88 in 38ms, sequenceid=13, compaction requested=true 2024-11-23T19:38:41,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 9c6e5b318bdd60aa7170518dd086ff88: 2024-11-23T19:38:41,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:41,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-23T19:38:41,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-23T19:38:41,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-23T19:38:41,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 195 msec 2024-11-23T19:38:41,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 204 msec 2024-11-23T19:38:41,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:41,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:42,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:42,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:43,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:43,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:44,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:44,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:45,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:45,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:46,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:46,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:47,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:47,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:48,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:48,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:49,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:49,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:50,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:50,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:50,829 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T19:38:50,829 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T19:38:51,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-23T19:38:51,008 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T19:38:51,009 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:38:51,010 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:38:51,010 DEBUG [Time-limited test {}] regionserver.HStore(1541): 9c6e5b318bdd60aa7170518dd086ff88/info is initiating minor compaction (all files) 2024-11-23T19:38:51,010 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:38:51,010 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:38:51,010 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 9c6e5b318bdd60aa7170518dd086ff88/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:51,010 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/47511e94e3ac413a858b4f9d66f4ac9c, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/1bef32acdc874f3ea58b3335ee2fc8ce, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/17982f3c3bf14efc90dfb4cebbb6731a] into tmpdir=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp, totalSize=17.7 K 2024-11-23T19:38:51,011 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 47511e94e3ac413a858b4f9d66f4ac9c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732390700726 2024-11-23T19:38:51,011 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1bef32acdc874f3ea58b3335ee2fc8ce, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732390710803 2024-11-23T19:38:51,011 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 17982f3c3bf14efc90dfb4cebbb6731a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732390720910 2024-11-23T19:38:51,025 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 9c6e5b318bdd60aa7170518dd086ff88#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:38:51,026 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/92ffcde260884178875322137f341a18 is 1080, key is row0001/info:/1732390700726/Put/seqid=0 2024-11-23T19:38:51,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741841_1017 (size=8296) 2024-11-23T19:38:51,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741841_1017 (size=8296) 2024-11-23T19:38:51,442 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/92ffcde260884178875322137f341a18 as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/92ffcde260884178875322137f341a18 2024-11-23T19:38:51,451 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9c6e5b318bdd60aa7170518dd086ff88/info of 9c6e5b318bdd60aa7170518dd086ff88 into 92ffcde260884178875322137f341a18(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:38:51,451 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 9c6e5b318bdd60aa7170518dd086ff88: 2024-11-23T19:38:51,454 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43033%2C1732390689303.1732390731454 2024-11-23T19:38:51,463 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:51,463 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:51,463 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:51,463 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:51,463 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:38:51,463 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390720912 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390731454 2024-11-23T19:38:51,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741839_1015 (size=2520) 2024-11-23T19:38:51,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741839_1015 (size=2520) 2024-11-23T19:38:51,468 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40895:40895),(127.0.0.1/127.0.0.1:44299:44299)] 2024-11-23T19:38:51,472 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390690021 to hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/oldWALs/387b213c044a%2C43033%2C1732390689303.1732390690021 2024-11-23T19:38:51,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:51,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:38:51,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-23T19:38:51,477 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-23T19:38:51,479 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T19:38:51,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T19:38:51,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:51,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:51,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43033 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-23T19:38:51,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:51,634 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 9c6e5b318bdd60aa7170518dd086ff88 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T19:38:51,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/6e341c38dc974223b546cec6e0199ded is 1080, key is row0000/info:/1732390731453/Put/seqid=0 2024-11-23T19:38:51,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741843_1019 (size=6033) 2024-11-23T19:38:51,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741843_1019 (size=6033) 2024-11-23T19:38:51,647 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/6e341c38dc974223b546cec6e0199ded 2024-11-23T19:38:51,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/6e341c38dc974223b546cec6e0199ded as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/6e341c38dc974223b546cec6e0199ded 2024-11-23T19:38:51,659 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/6e341c38dc974223b546cec6e0199ded, entries=1, sequenceid=18, filesize=5.9 K 2024-11-23T19:38:51,660 INFO [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9c6e5b318bdd60aa7170518dd086ff88 in 26ms, sequenceid=18, compaction requested=false 2024-11-23T19:38:51,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 9c6e5b318bdd60aa7170518dd086ff88: 2024-11-23T19:38:51,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:38:51,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-23T19:38:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-23T19:38:51,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-23T19:38:51,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-23T19:38:51,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-23T19:38:52,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:52,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:53,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:53,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:54,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:54,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:55,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:55,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:56,005 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9c6e5b318bdd60aa7170518dd086ff88, had cached 0 bytes from a total of 14329 2024-11-23T19:38:56,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:56,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:57,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:57,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:58,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:58,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:59,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:38:59,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:00,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:00,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40023 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-23T19:39:01,549 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-23T19:39:01,551 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43033%2C1732390689303.1732390741551 2024-11-23T19:39:01,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,559 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,559 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390731454 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390741551 2024-11-23T19:39:01,560 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40895:40895),(127.0.0.1/127.0.0.1:44299:44299)] 2024-11-23T19:39:01,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390731454 is not closed yet, will try archiving it next time 2024-11-23T19:39:01,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/WALs/387b213c044a,43033,1732390689303/387b213c044a%2C43033%2C1732390689303.1732390720912 to hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/oldWALs/387b213c044a%2C43033%2C1732390689303.1732390720912 2024-11-23T19:39:01,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:39:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741842_1018 (size=2026) 2024-11-23T19:39:01,561 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:39:01,561 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:01,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:01,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:01,561 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:39:01,561 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:39:01,561 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=374123830, stopped=false 2024-11-23T19:39:01,561 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,40023,1732390688975 2024-11-23T19:39:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741842_1018 (size=2026) 2024-11-23T19:39:01,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:01,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:01,727 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:39:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:01,727 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:39:01,728 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:01,728 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:01,728 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,43033,1732390689303' ***** 2024-11-23T19:39:01,728 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:39:01,728 INFO [RS:0;387b213c044a:43033 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:39:01,728 INFO [RS:0;387b213c044a:43033 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:39:01,728 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:39:01,728 INFO [RS:0;387b213c044a:43033 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:39:01,728 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(3091): Received CLOSE for 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:39:01,728 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,43033,1732390689303 2024-11-23T19:39:01,729 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:43033. 2024-11-23T19:39:01,729 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9c6e5b318bdd60aa7170518dd086ff88, disabling compactions & flushes 2024-11-23T19:39:01,729 DEBUG [RS:0;387b213c044a:43033 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:01,729 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:39:01,729 DEBUG [RS:0;387b213c044a:43033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:39:01,729 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:39:01,729 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. after waiting 0 ms 2024-11-23T19:39:01,729 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:39:01,729 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9c6e5b318bdd60aa7170518dd086ff88 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-23T19:39:01,729 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T19:39:01,729 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1325): Online Regions={9c6e5b318bdd60aa7170518dd086ff88=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T19:39:01,729 DEBUG [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9c6e5b318bdd60aa7170518dd086ff88 2024-11-23T19:39:01,729 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:39:01,730 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:39:01,730 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:39:01,730 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:39:01,730 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:39:01,730 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-23T19:39:01,734 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/44ef45052b254363bff9c6ad1fb4733b is 1080, key is row0001/info:/1732390741550/Put/seqid=0 2024-11-23T19:39:01,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741845_1021 (size=6033) 2024-11-23T19:39:01,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741845_1021 (size=6033) 2024-11-23T19:39:01,739 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/44ef45052b254363bff9c6ad1fb4733b 2024-11-23T19:39:01,746 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/.tmp/info/44ef45052b254363bff9c6ad1fb4733b as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/44ef45052b254363bff9c6ad1fb4733b 2024-11-23T19:39:01,752 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/44ef45052b254363bff9c6ad1fb4733b, entries=1, sequenceid=22, filesize=5.9 K 2024-11-23T19:39:01,752 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/info/85079b5e495a408bb60f32bf9730adf1 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88./info:regioninfo/1732390691023/Put/seqid=0 2024-11-23T19:39:01,753 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9c6e5b318bdd60aa7170518dd086ff88 in 24ms, sequenceid=22, compaction requested=true 2024-11-23T19:39:01,754 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/47511e94e3ac413a858b4f9d66f4ac9c, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/1bef32acdc874f3ea58b3335ee2fc8ce, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/17982f3c3bf14efc90dfb4cebbb6731a] to archive 2024-11-23T19:39:01,755 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T19:39:01,757 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/47511e94e3ac413a858b4f9d66f4ac9c to hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/47511e94e3ac413a858b4f9d66f4ac9c 2024-11-23T19:39:01,758 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/1bef32acdc874f3ea58b3335ee2fc8ce to hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/1bef32acdc874f3ea58b3335ee2fc8ce 2024-11-23T19:39:01,760 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/17982f3c3bf14efc90dfb4cebbb6731a to hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/info/17982f3c3bf14efc90dfb4cebbb6731a 2024-11-23T19:39:01,761 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=387b213c044a:40023 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T19:39:01,761 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [47511e94e3ac413a858b4f9d66f4ac9c=6033, 1bef32acdc874f3ea58b3335ee2fc8ce=6033, 17982f3c3bf14efc90dfb4cebbb6731a=6033] 2024-11-23T19:39:01,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741846_1022 (size=7308) 2024-11-23T19:39:01,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741846_1022 (size=7308) 2024-11-23T19:39:01,764 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/info/85079b5e495a408bb60f32bf9730adf1 2024-11-23T19:39:01,766 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/9c6e5b318bdd60aa7170518dd086ff88/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-23T19:39:01,767 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:39:01,767 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9c6e5b318bdd60aa7170518dd086ff88: Waiting for close lock at 1732390741729Running coprocessor pre-close hooks at 1732390741729Disabling compacts and flushes for region at 1732390741729Disabling writes for close at 1732390741729Obtaining lock to block concurrent updates at 1732390741729Preparing flush snapshotting stores in 9c6e5b318bdd60aa7170518dd086ff88 at 1732390741729Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732390741729Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. at 1732390741730 (+1 ms)Flushing 9c6e5b318bdd60aa7170518dd086ff88/info: creating writer at 1732390741730Flushing 9c6e5b318bdd60aa7170518dd086ff88/info: appending metadata at 1732390741733 (+3 ms)Flushing 9c6e5b318bdd60aa7170518dd086ff88/info: closing flushed file at 1732390741733Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@417f27: reopening flushed file at 1732390741745 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9c6e5b318bdd60aa7170518dd086ff88 in 24ms, sequenceid=22, compaction requested=true at 1732390741753 (+8 ms)Writing region close event to WAL at 1732390741762 (+9 ms)Running coprocessor post-close hooks at 1732390741766 (+4 ms)Closed at 1732390741767 (+1 ms) 2024-11-23T19:39:01,767 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732390690651.9c6e5b318bdd60aa7170518dd086ff88. 2024-11-23T19:39:01,785 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/ns/b2294c874b734e5786d9af3848358882 is 43, key is default/ns:d/1732390690527/Put/seqid=0 2024-11-23T19:39:01,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741847_1023 (size=5153) 2024-11-23T19:39:01,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741847_1023 (size=5153) 2024-11-23T19:39:01,790 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/ns/b2294c874b734e5786d9af3848358882 2024-11-23T19:39:01,809 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/table/2c53be5d3e0643bba202afbd068e68cd is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732390691033/Put/seqid=0 2024-11-23T19:39:01,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741848_1024 (size=5508) 2024-11-23T19:39:01,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741848_1024 (size=5508) 2024-11-23T19:39:01,814 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/table/2c53be5d3e0643bba202afbd068e68cd 2024-11-23T19:39:01,821 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/info/85079b5e495a408bb60f32bf9730adf1 as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/info/85079b5e495a408bb60f32bf9730adf1 2024-11-23T19:39:01,825 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/info/85079b5e495a408bb60f32bf9730adf1, entries=10, sequenceid=11, filesize=7.1 K 2024-11-23T19:39:01,826 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/ns/b2294c874b734e5786d9af3848358882 as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/ns/b2294c874b734e5786d9af3848358882 2024-11-23T19:39:01,831 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/ns/b2294c874b734e5786d9af3848358882, entries=2, sequenceid=11, filesize=5.0 K 2024-11-23T19:39:01,832 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/.tmp/table/2c53be5d3e0643bba202afbd068e68cd as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/table/2c53be5d3e0643bba202afbd068e68cd 2024-11-23T19:39:01,839 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/table/2c53be5d3e0643bba202afbd068e68cd, entries=2, sequenceid=11, filesize=5.4 K 2024-11-23T19:39:01,840 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 110ms, sequenceid=11, compaction requested=false 2024-11-23T19:39:01,845 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-23T19:39:01,846 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:39:01,846 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:01,846 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390741729Running coprocessor pre-close hooks at 1732390741729Disabling compacts and flushes for region at 1732390741729Disabling writes for close at 1732390741730 (+1 ms)Obtaining lock to block concurrent updates at 1732390741730Preparing flush snapshotting stores in 1588230740 at 1732390741730Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732390741730Flushing stores of hbase:meta,,1.1588230740 at 1732390741731 (+1 ms)Flushing 1588230740/info: creating writer at 1732390741731Flushing 1588230740/info: appending metadata at 1732390741752 (+21 ms)Flushing 1588230740/info: closing flushed file at 1732390741752Flushing 1588230740/ns: creating writer at 1732390741770 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732390741784 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732390741785 (+1 ms)Flushing 1588230740/table: creating writer at 1732390741795 (+10 ms)Flushing 1588230740/table: appending metadata at 1732390741808 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732390741808Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@331b5220: reopening flushed file at 1732390741820 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b762f64: reopening flushed file at 1732390741825 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c308b17: reopening flushed file at 1732390741831 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 110ms, sequenceid=11, compaction requested=false at 1732390741840 (+9 ms)Writing region close event to WAL at 1732390741842 (+2 ms)Running coprocessor post-close hooks at 1732390741846 (+4 ms)Closed at 1732390741846 2024-11-23T19:39:01,846 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:01,890 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:39:01,897 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T19:39:01,897 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T19:39:01,929 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,43033,1732390689303; all regions closed. 2024-11-23T19:39:01,932 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,932 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,932 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,932 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,933 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741834_1010 (size=3306) 2024-11-23T19:39:01,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741834_1010 (size=3306) 2024-11-23T19:39:01,936 DEBUG [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/oldWALs 2024-11-23T19:39:01,936 INFO [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C43033%2C1732390689303.meta:.meta(num 1732390690447) 2024-11-23T19:39:01,937 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,937 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,937 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,937 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,937 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:01,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741844_1020 (size=1252) 2024-11-23T19:39:01,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741844_1020 (size=1252) 2024-11-23T19:39:01,942 DEBUG [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/oldWALs 2024-11-23T19:39:01,942 INFO [RS:0;387b213c044a:43033 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C43033%2C1732390689303:(num 1732390741551) 2024-11-23T19:39:01,942 DEBUG [RS:0;387b213c044a:43033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:01,942 INFO [RS:0;387b213c044a:43033 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:39:01,942 INFO [RS:0;387b213c044a:43033 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:39:01,942 INFO [RS:0;387b213c044a:43033 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:39:01,942 INFO [RS:0;387b213c044a:43033 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:39:01,942 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:39:01,942 INFO [RS:0;387b213c044a:43033 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43033 2024-11-23T19:39:01,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,43033,1732390689303 2024-11-23T19:39:01,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:39:01,953 INFO [RS:0;387b213c044a:43033 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:39:01,954 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,43033,1732390689303] 2024-11-23T19:39:01,974 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,43033,1732390689303 already deleted, retry=false 2024-11-23T19:39:01,974 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,43033,1732390689303 expired; onlineServers=0 2024-11-23T19:39:01,974 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,40023,1732390688975' ***** 2024-11-23T19:39:01,974 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:39:01,974 INFO [M:0;387b213c044a:40023 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:39:01,974 INFO [M:0;387b213c044a:40023 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:39:01,975 DEBUG [M:0;387b213c044a:40023 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:39:01,975 DEBUG [M:0;387b213c044a:40023 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:39:01,975 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:39:01,975 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390689771 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390689771,5,FailOnTimeoutGroup] 2024-11-23T19:39:01,975 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390689771 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390689771,5,FailOnTimeoutGroup] 2024-11-23T19:39:01,975 INFO [M:0;387b213c044a:40023 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:39:01,975 INFO [M:0;387b213c044a:40023 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:39:01,975 DEBUG [M:0;387b213c044a:40023 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:39:01,975 INFO [M:0;387b213c044a:40023 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:39:01,975 INFO [M:0;387b213c044a:40023 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:39:01,975 INFO [M:0;387b213c044a:40023 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:39:01,975 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:39:01,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:39:01,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:01,985 DEBUG [M:0;387b213c044a:40023 {}] zookeeper.ZKUtil(347): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:39:01,985 WARN [M:0;387b213c044a:40023 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:39:01,986 INFO [M:0;387b213c044a:40023 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/.lastflushedseqids 2024-11-23T19:39:01,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741849_1025 (size=130) 2024-11-23T19:39:01,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741849_1025 (size=130) 2024-11-23T19:39:01,991 INFO [M:0;387b213c044a:40023 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:39:01,991 INFO [M:0;387b213c044a:40023 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:39:01,991 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:39:01,991 INFO [M:0;387b213c044a:40023 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:01,991 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:01,991 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:39:01,991 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:01,991 INFO [M:0;387b213c044a:40023 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.89 KB 2024-11-23T19:39:02,007 DEBUG [M:0;387b213c044a:40023 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd1d3143b05c481eabe0b6b4d89608a0 is 82, key is hbase:meta,,1/info:regioninfo/1732390690477/Put/seqid=0 2024-11-23T19:39:02,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741850_1026 (size=5672) 2024-11-23T19:39:02,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741850_1026 (size=5672) 2024-11-23T19:39:02,012 INFO [M:0;387b213c044a:40023 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd1d3143b05c481eabe0b6b4d89608a0 2024-11-23T19:39:02,033 DEBUG [M:0;387b213c044a:40023 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8542f81018e540028ed2d78c7df80f14 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732390691037/Put/seqid=0 2024-11-23T19:39:02,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741851_1027 (size=7817) 2024-11-23T19:39:02,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741851_1027 (size=7817) 2024-11-23T19:39:02,038 INFO [M:0;387b213c044a:40023 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.93 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8542f81018e540028ed2d78c7df80f14 2024-11-23T19:39:02,043 INFO [M:0;387b213c044a:40023 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8542f81018e540028ed2d78c7df80f14 2024-11-23T19:39:02,058 DEBUG [M:0;387b213c044a:40023 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8085c7d9470a4086835d22f415409f5d is 69, key is 387b213c044a,43033,1732390689303/rs:state/1732390689865/Put/seqid=0 2024-11-23T19:39:02,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741852_1028 (size=5156) 2024-11-23T19:39:02,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741852_1028 (size=5156) 2024-11-23T19:39:02,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:02,064 INFO [RS:0;387b213c044a:43033 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:39:02,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43033-0x101693414690001, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:02,064 INFO [RS:0;387b213c044a:43033 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,43033,1732390689303; zookeeper connection closed. 2024-11-23T19:39:02,064 INFO [M:0;387b213c044a:40023 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8085c7d9470a4086835d22f415409f5d 2024-11-23T19:39:02,064 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@508a14b1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@508a14b1 2024-11-23T19:39:02,065 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T19:39:02,088 DEBUG [M:0;387b213c044a:40023 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9415efa4434146ffb9cd445c52cdd000 is 52, key is load_balancer_on/state:d/1732390690646/Put/seqid=0 2024-11-23T19:39:02,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741853_1029 (size=5056) 2024-11-23T19:39:02,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741853_1029 (size=5056) 2024-11-23T19:39:02,093 INFO [M:0;387b213c044a:40023 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9415efa4434146ffb9cd445c52cdd000 2024-11-23T19:39:02,099 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd1d3143b05c481eabe0b6b4d89608a0 as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd1d3143b05c481eabe0b6b4d89608a0 2024-11-23T19:39:02,104 INFO [M:0;387b213c044a:40023 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd1d3143b05c481eabe0b6b4d89608a0, entries=8, sequenceid=121, filesize=5.5 K 2024-11-23T19:39:02,105 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8542f81018e540028ed2d78c7df80f14 as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8542f81018e540028ed2d78c7df80f14 2024-11-23T19:39:02,110 INFO [M:0;387b213c044a:40023 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8542f81018e540028ed2d78c7df80f14 2024-11-23T19:39:02,110 INFO [M:0;387b213c044a:40023 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8542f81018e540028ed2d78c7df80f14, entries=14, sequenceid=121, filesize=7.6 K 2024-11-23T19:39:02,111 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8085c7d9470a4086835d22f415409f5d as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8085c7d9470a4086835d22f415409f5d 2024-11-23T19:39:02,117 INFO [M:0;387b213c044a:40023 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8085c7d9470a4086835d22f415409f5d, entries=1, sequenceid=121, filesize=5.0 K 2024-11-23T19:39:02,117 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9415efa4434146ffb9cd445c52cdd000 as hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9415efa4434146ffb9cd445c52cdd000 2024-11-23T19:39:02,122 INFO [M:0;387b213c044a:40023 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42187/user/jenkins/test-data/888b9641-7d57-8905-46fe-01af68b1c868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9415efa4434146ffb9cd445c52cdd000, entries=1, sequenceid=121, filesize=4.9 K 2024-11-23T19:39:02,123 INFO [M:0;387b213c044a:40023 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=121, compaction requested=false 2024-11-23T19:39:02,125 INFO [M:0;387b213c044a:40023 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:02,125 DEBUG [M:0;387b213c044a:40023 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390741991Disabling compacts and flushes for region at 1732390741991Disabling writes for close at 1732390741991Obtaining lock to block concurrent updates at 1732390741991Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390741991Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44581, getHeapSize=56144, getOffHeapSize=0, getCellsCount=140 at 1732390741991Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390741992 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390741992Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390742007 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390742007Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390742018 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390742032 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390742033 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390742044 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390742058 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390742058Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390742070 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390742088 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390742088Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43f034df: reopening flushed file at 1732390742098 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8a5c2a2: reopening flushed file at 1732390742104 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42f4428b: reopening flushed file at 1732390742110 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2edf3624: reopening flushed file at 1732390742117 (+7 ms)Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 132ms, sequenceid=121, compaction requested=false at 1732390742123 (+6 ms)Writing region close event to WAL at 1732390742125 (+2 ms)Closed at 1732390742125 2024-11-23T19:39:02,125 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:02,125 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:02,125 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:02,125 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:02,125 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:02,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39749 is added to blk_1073741830_1006 (size=52978) 2024-11-23T19:39:02,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39783 is added to blk_1073741830_1006 (size=52978) 2024-11-23T19:39:02,128 INFO [M:0;387b213c044a:40023 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:39:02,128 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:39:02,128 INFO [M:0;387b213c044a:40023 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40023 2024-11-23T19:39:02,128 INFO [M:0;387b213c044a:40023 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:39:02,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:02,238 INFO [M:0;387b213c044a:40023 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:39:02,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40023-0x101693414690000, quorum=127.0.0.1:50342, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:02,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c6abbb8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:02,278 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@555a4a92{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:02,278 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:02,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f411ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:02,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ec76923{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:02,280 WARN [BP-834335505-172.17.0.3-1732390686733 heartbeating to localhost/127.0.0.1:42187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:39:02,280 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:39:02,280 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:39:02,280 WARN [BP-834335505-172.17.0.3-1732390686733 heartbeating to localhost/127.0.0.1:42187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-834335505-172.17.0.3-1732390686733 (Datanode Uuid 347fc59d-5d84-4f5f-a89d-096d3191ace5) service to localhost/127.0.0.1:42187 2024-11-23T19:39:02,281 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data3/current/BP-834335505-172.17.0.3-1732390686733 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:02,281 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data4/current/BP-834335505-172.17.0.3-1732390686733 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:02,281 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:39:02,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5127cbf0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:02,290 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9b25e94{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:02,290 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:02,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74b5ebca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:02,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39f00f78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:02,292 WARN [BP-834335505-172.17.0.3-1732390686733 heartbeating to localhost/127.0.0.1:42187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:39:02,292 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:39:02,292 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:39:02,292 WARN [BP-834335505-172.17.0.3-1732390686733 heartbeating to localhost/127.0.0.1:42187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-834335505-172.17.0.3-1732390686733 (Datanode Uuid b7ef4820-dc05-4b96-9952-ac870b07d58f) service to localhost/127.0.0.1:42187 2024-11-23T19:39:02,292 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data1/current/BP-834335505-172.17.0.3-1732390686733 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:02,293 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/cluster_29ab16ff-714f-caed-9019-95fea71ec849/data/data2/current/BP-834335505-172.17.0.3-1732390686733 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:02,293 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:39:02,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2719a663{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:39:02,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1af676f5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:02,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:02,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc3ea71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:02,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a4b134d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:02,309 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:39:02,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:39:02,347 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42187 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42187 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42187 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42187 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42187 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42187 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=247 (was 294), ProcessCount=11 (was 11), AvailableMemoryMB=2708 (was 2726) 2024-11-23T19:39:02,360 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=247, ProcessCount=11, AvailableMemoryMB=2708 2024-11-23T19:39:02,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:39:02,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.log.dir so I do NOT create it in target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2 2024-11-23T19:39:02,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee4d0cb0-4d2f-d517-34c2-c52b98f4d7fc/hadoop.tmp.dir so I do NOT create it in target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2 2024-11-23T19:39:02,360 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0, deleteOnExit=true 2024-11-23T19:39:02,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/test.cache.data in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:39:02,361 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:39:02,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:39:02,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:39:02,377 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:39:02,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:02,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:02,698 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:02,702 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:39:02,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:39:02,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:39:02,703 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:39:02,704 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:02,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24350cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:39:02,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f8818bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:39:02,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cf57c4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/java.io.tmpdir/jetty-localhost-39933-hadoop-hdfs-3_4_1-tests_jar-_-any-4781027249541941746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:39:02,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a67ff9c{HTTP/1.1, (http/1.1)}{localhost:39933} 2024-11-23T19:39:02,808 INFO [Time-limited test {}] server.Server(415): Started @255339ms 2024-11-23T19:39:02,824 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:39:03,101 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:03,105 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:39:03,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:39:03,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:39:03,108 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:39:03,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2001df3c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:39:03,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f53b1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:39:03,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70d55230{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/java.io.tmpdir/jetty-localhost-46017-hadoop-hdfs-3_4_1-tests_jar-_-any-10344964153440774116/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:03,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7986f193{HTTP/1.1, (http/1.1)}{localhost:46017} 2024-11-23T19:39:03,208 INFO [Time-limited test {}] server.Server(415): Started @255739ms 2024-11-23T19:39:03,210 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:39:03,250 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:03,254 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:39:03,256 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:39:03,256 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:39:03,257 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:39:03,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@692ba77d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:39:03,258 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25f949b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:39:03,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9caeb33{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/java.io.tmpdir/jetty-localhost-45365-hadoop-hdfs-3_4_1-tests_jar-_-any-2990293324060931341/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:03,372 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1284b092{HTTP/1.1, (http/1.1)}{localhost:45365} 2024-11-23T19:39:03,372 INFO [Time-limited test {}] server.Server(415): Started @255903ms 2024-11-23T19:39:03,374 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:39:03,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:03,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:04,255 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data2/current/BP-639699025-172.17.0.3-1732390742380/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:04,255 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data1/current/BP-639699025-172.17.0.3-1732390742380/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:04,276 WARN [Thread-1942 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:39:04,279 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53b6f5487e7141c6 with lease ID 0x638d05bc442ebad6: Processing first storage report for DS-947b753e-4626-4af1-9ffb-474b271c4970 from datanode DatanodeRegistration(127.0.0.1:42507, datanodeUuid=610b4960-f9b6-4ac6-95cd-a7145fad1416, infoPort=41403, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380) 2024-11-23T19:39:04,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53b6f5487e7141c6 with lease ID 0x638d05bc442ebad6: from storage DS-947b753e-4626-4af1-9ffb-474b271c4970 node DatanodeRegistration(127.0.0.1:42507, datanodeUuid=610b4960-f9b6-4ac6-95cd-a7145fad1416, infoPort=41403, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T19:39:04,279 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53b6f5487e7141c6 with lease ID 0x638d05bc442ebad6: Processing first storage report for DS-f9096b9e-10b6-4e27-b863-44c5543b1c69 from datanode DatanodeRegistration(127.0.0.1:42507, datanodeUuid=610b4960-f9b6-4ac6-95cd-a7145fad1416, infoPort=41403, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380) 2024-11-23T19:39:04,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53b6f5487e7141c6 with lease ID 0x638d05bc442ebad6: from storage DS-f9096b9e-10b6-4e27-b863-44c5543b1c69 node DatanodeRegistration(127.0.0.1:42507, datanodeUuid=610b4960-f9b6-4ac6-95cd-a7145fad1416, infoPort=41403, infoSecurePort=0, ipcPort=43315, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:04,416 WARN [Thread-1989 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data3/current/BP-639699025-172.17.0.3-1732390742380/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:04,416 WARN [Thread-1990 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data4/current/BP-639699025-172.17.0.3-1732390742380/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:04,437 WARN [Thread-1965 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:39:04,438 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93ca1301d30ce8ee with lease ID 0x638d05bc442ebad7: Processing first storage report for DS-882dfdf4-a535-426a-b1a0-1e95f89a4ff8 from datanode DatanodeRegistration(127.0.0.1:46697, datanodeUuid=e9989444-cdec-419d-a323-ca0ed17f10ce, infoPort=43601, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380) 2024-11-23T19:39:04,438 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93ca1301d30ce8ee with lease ID 0x638d05bc442ebad7: from storage DS-882dfdf4-a535-426a-b1a0-1e95f89a4ff8 node DatanodeRegistration(127.0.0.1:46697, datanodeUuid=e9989444-cdec-419d-a323-ca0ed17f10ce, infoPort=43601, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:04,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x93ca1301d30ce8ee with lease ID 0x638d05bc442ebad7: Processing first storage report for DS-dc297fc4-7a0a-4633-90ef-7556913f1b9d from datanode DatanodeRegistration(127.0.0.1:46697, datanodeUuid=e9989444-cdec-419d-a323-ca0ed17f10ce, infoPort=43601, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380) 2024-11-23T19:39:04,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x93ca1301d30ce8ee with lease ID 0x638d05bc442ebad7: from storage DS-dc297fc4-7a0a-4633-90ef-7556913f1b9d node DatanodeRegistration(127.0.0.1:46697, datanodeUuid=e9989444-cdec-419d-a323-ca0ed17f10ce, infoPort=43601, infoSecurePort=0, ipcPort=44457, storageInfo=lv=-57;cid=testClusterID;nsid=743530375;c=1732390742380), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:04,516 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2 2024-11-23T19:39:04,525 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/zookeeper_0, clientPort=63840, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:39:04,526 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63840 2024-11-23T19:39:04,527 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,528 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:39:04,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:39:04,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:39:04,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-23T19:39:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:39:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:39:04,551 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98 with version=8 2024-11-23T19:39:04,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:39:04,554 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:39:04,554 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:39:04,555 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42085 2024-11-23T19:39:04,556 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42085 connecting to ZooKeeper ensemble=127.0.0.1:63840 2024-11-23T19:39:04,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:04,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:04,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420850x0, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:39:04,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42085-0x1016934ed750000 connected 2024-11-23T19:39:04,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,734 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:04,734 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98, hbase.cluster.distributed=false 2024-11-23T19:39:04,736 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:39:04,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42085 2024-11-23T19:39:04,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42085 2024-11-23T19:39:04,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42085 2024-11-23T19:39:04,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42085 2024-11-23T19:39:04,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42085 2024-11-23T19:39:04,769 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:39:04,769 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:39:04,770 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41609 2024-11-23T19:39:04,772 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41609 connecting to ZooKeeper ensemble=127.0.0.1:63840 2024-11-23T19:39:04,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416090x0, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:39:04,783 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416090x0, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:04,784 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41609-0x1016934ed750001 connected 2024-11-23T19:39:04,784 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:39:04,788 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:39:04,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:39:04,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:39:04,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41609 2024-11-23T19:39:04,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41609 2024-11-23T19:39:04,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41609 2024-11-23T19:39:04,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41609 2024-11-23T19:39:04,797 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41609 2024-11-23T19:39:04,811 DEBUG [M:0;387b213c044a:42085 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:42085 2024-11-23T19:39:04,812 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,42085,1732390744553 2024-11-23T19:39:04,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:04,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:04,828 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,42085,1732390744553 2024-11-23T19:39:04,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:39:04,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:04,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:04,836 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:39:04,839 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,42085,1732390744553 from backup master directory 2024-11-23T19:39:04,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,42085,1732390744553 2024-11-23T19:39:04,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:04,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:04,846 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:39:04,846 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,42085,1732390744553 2024-11-23T19:39:04,855 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/hbase.id] with ID: f99583c9-4d00-44b6-8308-d4248d548751 2024-11-23T19:39:04,856 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/.tmp/hbase.id 2024-11-23T19:39:04,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:39:04,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:39:04,867 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/.tmp/hbase.id]:[hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/hbase.id] 2024-11-23T19:39:04,882 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:04,882 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:39:04,884 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-23T19:39:04,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:04,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:04,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:39:04,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:39:04,906 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:39:04,907 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:39:04,907 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:04,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:39:04,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:39:04,923 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store 2024-11-23T19:39:04,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:39:04,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:39:04,930 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:04,931 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:39:04,931 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:04,931 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:04,931 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:39:04,931 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:04,931 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:04,931 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390744930Disabling compacts and flushes for region at 1732390744930Disabling writes for close at 1732390744931 (+1 ms)Writing region close event to WAL at 1732390744931Closed at 1732390744931 2024-11-23T19:39:04,932 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/.initializing 2024-11-23T19:39:04,932 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/WALs/387b213c044a,42085,1732390744553 2024-11-23T19:39:04,934 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C42085%2C1732390744553, suffix=, logDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/WALs/387b213c044a,42085,1732390744553, archiveDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/oldWALs, maxLogs=10 2024-11-23T19:39:04,935 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C42085%2C1732390744553.1732390744935 2024-11-23T19:39:04,946 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/WALs/387b213c044a,42085,1732390744553/387b213c044a%2C42085%2C1732390744553.1732390744935 2024-11-23T19:39:04,949 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43601:43601),(127.0.0.1/127.0.0.1:41403:41403)] 2024-11-23T19:39:04,954 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:39:04,954 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:04,954 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,954 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:39:04,957 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:04,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:04,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:39:04,959 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:04,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:04,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:39:04,961 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:04,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:04,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:39:04,963 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:04,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:04,964 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,965 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,966 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,967 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,967 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,967 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:39:04,968 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:04,970 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:39:04,970 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880750, jitterRate=0.11993275582790375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:39:04,971 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390744954Initializing all the Stores at 1732390744955 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390744955Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390744956 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390744956Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390744956Cleaning up temporary data from old regions at 1732390744967 (+11 ms)Region opened successfully at 1732390744971 (+4 ms) 2024-11-23T19:39:04,971 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:39:04,973 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31ac4f2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:39:04,974 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:39:04,974 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:39:04,974 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:39:04,974 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:39:04,975 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T19:39:04,975 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T19:39:04,975 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:39:04,977 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:39:04,978 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:39:04,989 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:39:04,989 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:39:04,990 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:39:04,999 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:39:05,000 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:39:05,001 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:39:05,010 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:39:05,011 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:39:05,021 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:39:05,023 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:39:05,035 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:39:05,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:05,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:05,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,046 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,42085,1732390744553, sessionid=0x1016934ed750000, setting cluster-up flag (Was=false) 2024-11-23T19:39:05,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,098 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:39:05,100 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,42085,1732390744553 2024-11-23T19:39:05,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,151 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:39:05,152 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,42085,1732390744553 2024-11-23T19:39:05,153 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:39:05,155 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:05,155 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:39:05,155 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:39:05,155 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,42085,1732390744553 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:39:05,156 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:05,156 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:05,157 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:05,157 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:05,157 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:39:05,157 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,157 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:39:05,157 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,164 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390775164 2024-11-23T19:39:05,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:39:05,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:39:05,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:39:05,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:39:05,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:39:05,165 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:39:05,165 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:05,165 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:39:05,167 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:05,167 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:39:05,168 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,171 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:39:05,171 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:39:05,171 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:39:05,172 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:39:05,172 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:39:05,175 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390745172,5,FailOnTimeoutGroup] 2024-11-23T19:39:05,176 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390745175,5,FailOnTimeoutGroup] 2024-11-23T19:39:05,176 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,176 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:39:05,176 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,176 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:39:05,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:39:05,201 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(746): ClusterId : f99583c9-4d00-44b6-8308-d4248d548751 2024-11-23T19:39:05,201 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:39:05,211 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:39:05,211 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:39:05,222 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:39:05,223 DEBUG [RS:0;387b213c044a:41609 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@583b05e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:39:05,240 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:41609 2024-11-23T19:39:05,240 INFO [RS:0;387b213c044a:41609 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:39:05,240 INFO [RS:0;387b213c044a:41609 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:39:05,240 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:39:05,241 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,42085,1732390744553 with port=41609, startcode=1732390744769 2024-11-23T19:39:05,242 DEBUG [RS:0;387b213c044a:41609 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:39:05,249 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55995, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:39:05,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42085 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,41609,1732390744769 2024-11-23T19:39:05,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42085 {}] master.ServerManager(517): Registering regionserver=387b213c044a,41609,1732390744769 2024-11-23T19:39:05,252 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98 2024-11-23T19:39:05,252 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41669 2024-11-23T19:39:05,252 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:39:05,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:39:05,263 DEBUG [RS:0;387b213c044a:41609 {}] zookeeper.ZKUtil(111): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,41609,1732390744769 2024-11-23T19:39:05,263 WARN [RS:0;387b213c044a:41609 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:39:05,263 INFO [RS:0;387b213c044a:41609 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:05,264 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769 2024-11-23T19:39:05,264 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,41609,1732390744769] 2024-11-23T19:39:05,276 INFO [RS:0;387b213c044a:41609 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:39:05,284 INFO [RS:0;387b213c044a:41609 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:39:05,285 INFO [RS:0;387b213c044a:41609 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:39:05,285 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,288 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:39:05,289 INFO [RS:0;387b213c044a:41609 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:39:05,289 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,289 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,289 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,289 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,289 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:39:05,290 DEBUG [RS:0;387b213c044a:41609 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:39:05,296 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,296 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,296 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,296 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,296 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,296 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,41609,1732390744769-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:39:05,317 INFO [RS:0;387b213c044a:41609 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:39:05,318 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,41609,1732390744769-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,318 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,318 INFO [RS:0;387b213c044a:41609 {}] regionserver.Replication(171): 387b213c044a,41609,1732390744769 started 2024-11-23T19:39:05,334 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:05,334 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,41609,1732390744769, RpcServer on 387b213c044a/172.17.0.3:41609, sessionid=0x1016934ed750001 2024-11-23T19:39:05,335 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:39:05,335 DEBUG [RS:0;387b213c044a:41609 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,41609,1732390744769 2024-11-23T19:39:05,335 DEBUG [RS:0;387b213c044a:41609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,41609,1732390744769' 2024-11-23T19:39:05,335 DEBUG [RS:0;387b213c044a:41609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:39:05,335 DEBUG [RS:0;387b213c044a:41609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,41609,1732390744769 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,41609,1732390744769' 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:39:05,336 DEBUG [RS:0;387b213c044a:41609 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:39:05,336 INFO [RS:0;387b213c044a:41609 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:39:05,336 INFO [RS:0;387b213c044a:41609 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:39:05,439 INFO [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C41609%2C1732390744769, suffix=, logDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769, archiveDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/oldWALs, maxLogs=32 2024-11-23T19:39:05,439 INFO [RS:0;387b213c044a:41609 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C41609%2C1732390744769.1732390745439 2024-11-23T19:39:05,456 INFO [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390745439 2024-11-23T19:39:05,457 DEBUG [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43601:43601),(127.0.0.1/127.0.0.1:41403:41403)] 2024-11-23T19:39:05,587 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:39:05,587 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98 2024-11-23T19:39:05,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:05,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741833_1009 (size=32) 2024-11-23T19:39:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741833_1009 (size=32) 2024-11-23T19:39:05,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:05,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:39:05,606 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:39:05,606 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:05,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:05,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:39:05,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:39:05,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:05,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:05,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:39:05,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:39:05,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:05,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:05,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:39:05,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:39:05,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:05,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:05,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:39:05,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740 2024-11-23T19:39:05,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740 2024-11-23T19:39:05,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:39:05,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:39:05,616 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:39:05,617 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:39:05,619 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:39:05,619 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875616, jitterRate=0.11340416967868805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:39:05,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390745603Initializing all the Stores at 1732390745604 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390745604Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390745604Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390745604Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390745604Cleaning up temporary data from old regions at 1732390745615 (+11 ms)Region opened successfully at 1732390745620 (+5 ms) 2024-11-23T19:39:05,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:39:05,620 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:39:05,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:39:05,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:39:05,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:39:05,623 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:05,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390745620Disabling compacts and flushes for region at 1732390745620Disabling writes for close at 1732390745620Writing region close event to WAL at 1732390745623 (+3 ms)Closed at 1732390745623 2024-11-23T19:39:05,625 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:05,625 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:39:05,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:39:05,627 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:39:05,629 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:39:05,779 DEBUG [387b213c044a:42085 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:39:05,780 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:05,781 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,41609,1732390744769, state=OPENING 2024-11-23T19:39:05,824 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:39:05,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:05,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:05,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:05,836 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:39:05,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,41609,1732390744769}] 2024-11-23T19:39:05,993 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:39:05,995 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59749, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:39:05,999 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:39:05,999 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:06,001 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C41609%2C1732390744769.meta, suffix=.meta, logDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769, archiveDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/oldWALs, maxLogs=32 2024-11-23T19:39:06,002 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C41609%2C1732390744769.meta.1732390746002.meta 2024-11-23T19:39:06,020 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.meta.1732390746002.meta 2024-11-23T19:39:06,020 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43601:43601),(127.0.0.1/127.0.0.1:41403:41403)] 2024-11-23T19:39:06,021 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:39:06,021 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:39:06,021 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:39:06,022 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:39:06,022 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:39:06,022 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:06,022 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:39:06,022 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:39:06,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:39:06,023 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:39:06,023 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:06,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:06,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:39:06,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:39:06,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:06,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:06,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:39:06,026 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:39:06,026 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:06,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:06,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:39:06,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:39:06,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:06,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:06,027 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:39:06,028 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740 2024-11-23T19:39:06,030 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740 2024-11-23T19:39:06,036 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:39:06,036 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:39:06,036 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:39:06,037 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:39:06,038 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705006, jitterRate=-0.1035396158695221}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:39:06,038 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:39:06,039 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390746022Writing region info on filesystem at 1732390746022Initializing all the Stores at 1732390746022Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390746023 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390746023Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390746023Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390746023Cleaning up temporary data from old regions at 1732390746036 (+13 ms)Running coprocessor post-open hooks at 1732390746038 (+2 ms)Region opened successfully at 1732390746039 (+1 ms) 2024-11-23T19:39:06,039 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390745993 2024-11-23T19:39:06,042 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:39:06,042 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:39:06,043 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:06,044 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,41609,1732390744769, state=OPEN 2024-11-23T19:39:06,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:39:06,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:39:06,106 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,41609,1732390744769 2024-11-23T19:39:06,106 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:06,107 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:06,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:39:06,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,41609,1732390744769 in 270 msec 2024-11-23T19:39:06,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:39:06,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 484 msec 2024-11-23T19:39:06,112 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:06,112 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:39:06,113 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:39:06,114 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,41609,1732390744769, seqNum=-1] 2024-11-23T19:39:06,114 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:39:06,115 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51321, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:39:06,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 964 msec 2024-11-23T19:39:06,120 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390746120, completionTime=-1 2024-11-23T19:39:06,120 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:39:06,120 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:39:06,122 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:39:06,122 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390806122 2024-11-23T19:39:06,122 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390866122 2024-11-23T19:39:06,122 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-23T19:39:06,122 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,42085,1732390744553-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,123 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,42085,1732390744553-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,123 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,42085,1732390744553-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,123 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:42085, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,123 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,123 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,125 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.281sec 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,42085,1732390744553-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:39:06,127 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,42085,1732390744553-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:39:06,130 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:39:06,130 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:39:06,130 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,42085,1732390744553-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:06,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fd98a54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:39:06,202 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,42085,-1 for getting cluster id 2024-11-23T19:39:06,202 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:39:06,203 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f99583c9-4d00-44b6-8308-d4248d548751' 2024-11-23T19:39:06,204 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:39:06,204 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f99583c9-4d00-44b6-8308-d4248d548751" 2024-11-23T19:39:06,204 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4488f2e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:39:06,204 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,42085,-1] 2024-11-23T19:39:06,204 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:39:06,204 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:06,205 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53768, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:39:06,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d09ac1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:39:06,206 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:39:06,207 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,41609,1732390744769, seqNum=-1] 2024-11-23T19:39:06,208 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:39:06,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38260, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:39:06,211 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,42085,1732390744553 2024-11-23T19:39:06,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:06,213 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:39:06,214 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T19:39:06,215 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 387b213c044a,42085,1732390744553 2024-11-23T19:39:06,215 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@65cb8af6 2024-11-23T19:39:06,215 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T19:39:06,216 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53782, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T19:39:06,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-23T19:39:06,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-23T19:39:06,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:39:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-23T19:39:06,219 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T19:39:06,219 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:06,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-23T19:39:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:39:06,220 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T19:39:06,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741835_1011 (size=381) 2024-11-23T19:39:06,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741835_1011 (size=381) 2024-11-23T19:39:06,231 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bd1ea0efe9f4783ac621c9c9a4216151, NAME => 'TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98 2024-11-23T19:39:06,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741836_1012 (size=64) 2024-11-23T19:39:06,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741836_1012 (size=64) 2024-11-23T19:39:06,242 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:06,243 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing bd1ea0efe9f4783ac621c9c9a4216151, disabling compactions & flushes 2024-11-23T19:39:06,243 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,243 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,243 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. after waiting 0 ms 2024-11-23T19:39:06,243 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,243 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,243 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for bd1ea0efe9f4783ac621c9c9a4216151: Waiting for close lock at 1732390746242Disabling compacts and flushes for region at 1732390746242Disabling writes for close at 1732390746243 (+1 ms)Writing region close event to WAL at 1732390746243Closed at 1732390746243 2024-11-23T19:39:06,244 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T19:39:06,245 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732390746244"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390746244"}]},"ts":"1732390746244"} 2024-11-23T19:39:06,247 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T19:39:06,248 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T19:39:06,248 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390746248"}]},"ts":"1732390746248"} 2024-11-23T19:39:06,251 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-23T19:39:06,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, ASSIGN}] 2024-11-23T19:39:06,253 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, ASSIGN 2024-11-23T19:39:06,254 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, ASSIGN; state=OFFLINE, location=387b213c044a,41609,1732390744769; forceNewPlan=false, retain=false 2024-11-23T19:39:06,404 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bd1ea0efe9f4783ac621c9c9a4216151, regionState=OPENING, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:06,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, ASSIGN because future has completed 2024-11-23T19:39:06,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd1ea0efe9f4783ac621c9c9a4216151, server=387b213c044a,41609,1732390744769}] 2024-11-23T19:39:06,564 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,565 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bd1ea0efe9f4783ac621c9c9a4216151, NAME => 'TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:39:06,565 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,565 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:06,565 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,565 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,566 INFO [StoreOpener-bd1ea0efe9f4783ac621c9c9a4216151-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,568 INFO [StoreOpener-bd1ea0efe9f4783ac621c9c9a4216151-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd1ea0efe9f4783ac621c9c9a4216151 columnFamilyName info 2024-11-23T19:39:06,568 DEBUG [StoreOpener-bd1ea0efe9f4783ac621c9c9a4216151-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:06,568 INFO [StoreOpener-bd1ea0efe9f4783ac621c9c9a4216151-1 {}] regionserver.HStore(327): Store=bd1ea0efe9f4783ac621c9c9a4216151/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:06,569 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,569 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,570 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,570 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,570 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,571 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,574 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:39:06,574 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bd1ea0efe9f4783ac621c9c9a4216151; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793502, jitterRate=0.008990690112113953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:39:06,574 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:06,575 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bd1ea0efe9f4783ac621c9c9a4216151: Running coprocessor pre-open hook at 1732390746565Writing region info on filesystem at 1732390746565Initializing all the Stores at 1732390746566 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390746566Cleaning up temporary data from old regions at 1732390746570 (+4 ms)Running coprocessor post-open hooks at 1732390746574 (+4 ms)Region opened successfully at 1732390746575 (+1 ms) 2024-11-23T19:39:06,576 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., pid=6, masterSystemTime=1732390746560 2024-11-23T19:39:06,578 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,578 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:06,579 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bd1ea0efe9f4783ac621c9c9a4216151, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:06,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd1ea0efe9f4783ac621c9c9a4216151, server=387b213c044a,41609,1732390744769 because future has completed 2024-11-23T19:39:06,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T19:39:06,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bd1ea0efe9f4783ac621c9c9a4216151, server=387b213c044a,41609,1732390744769 in 176 msec 2024-11-23T19:39:06,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T19:39:06,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, ASSIGN in 334 msec 2024-11-23T19:39:06,587 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T19:39:06,588 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732390746587"}]},"ts":"1732390746587"} 2024-11-23T19:39:06,590 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-23T19:39:06,591 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T19:39:06,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 374 msec 2024-11-23T19:39:06,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:06,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:06,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:06,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,309 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:39:07,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:07,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:07,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:08,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:08,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:09,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:09,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:10,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:10,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:11,276 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T19:39:11,277 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-23T19:39:11,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:11,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:12,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:12,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:13,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:13,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:14,534 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:39:14,534 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T19:39:14,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:39:14,535 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T19:39:14,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:39:14,535 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T19:39:14,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-23T19:39:14,536 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-23T19:39:14,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:14,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:15,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:15,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:16,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42085 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T19:39:16,228 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-23T19:39:16,228 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-23T19:39:16,230 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-23T19:39:16,230 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:16,232 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., hostname=387b213c044a,41609,1732390744769, seqNum=2] 2024-11-23T19:39:16,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:16,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:16,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/0776b58665134f679fa8a95201642c19 is 1080, key is row0001/info:/1732390756233/Put/seqid=0 2024-11-23T19:39:16,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741837_1013 (size=12509) 2024-11-23T19:39:16,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741837_1013 (size=12509) 2024-11-23T19:39:16,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/0776b58665134f679fa8a95201642c19 2024-11-23T19:39:16,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/0776b58665134f679fa8a95201642c19 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/0776b58665134f679fa8a95201642c19 2024-11-23T19:39:16,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/0776b58665134f679fa8a95201642c19, entries=7, sequenceid=11, filesize=12.2 K 2024-11-23T19:39:16,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for bd1ea0efe9f4783ac621c9c9a4216151 in 38ms, sequenceid=11, compaction requested=false 2024-11-23T19:39:16,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:16,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-23T19:39:16,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/f45ce4970f2f4591bbe73c35eef97186 is 1080, key is row0008/info:/1732390756245/Put/seqid=0 2024-11-23T19:39:16,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741838_1014 (size=25453) 2024-11-23T19:39:16,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741838_1014 (size=25453) 2024-11-23T19:39:16,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/f45ce4970f2f4591bbe73c35eef97186 2024-11-23T19:39:16,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/f45ce4970f2f4591bbe73c35eef97186 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186 2024-11-23T19:39:16,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186, entries=19, sequenceid=33, filesize=24.9 K 2024-11-23T19:39:16,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for bd1ea0efe9f4783ac621c9c9a4216151 in 21ms, sequenceid=33, compaction requested=false 2024-11-23T19:39:16,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:16,304 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-23T19:39:16,304 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:16,304 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186 because midkey is the same as first or last row 2024-11-23T19:39:16,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:16,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:17,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:17,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:18,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:18,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:18,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/6ed020b815ae45bbb61677bd78a33f7c is 1080, key is row0027/info:/1732390756284/Put/seqid=0 2024-11-23T19:39:18,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741839_1015 (size=12509) 2024-11-23T19:39:18,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741839_1015 (size=12509) 2024-11-23T19:39:18,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/6ed020b815ae45bbb61677bd78a33f7c 2024-11-23T19:39:18,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/6ed020b815ae45bbb61677bd78a33f7c as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/6ed020b815ae45bbb61677bd78a33f7c 2024-11-23T19:39:18,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/6ed020b815ae45bbb61677bd78a33f7c, entries=7, sequenceid=43, filesize=12.2 K 2024-11-23T19:39:18,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for bd1ea0efe9f4783ac621c9c9a4216151 in 28ms, sequenceid=43, compaction requested=true 2024-11-23T19:39:18,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:18,329 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,330 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,330 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186 because midkey is the same as first or last row 2024-11-23T19:39:18,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bd1ea0efe9f4783ac621c9c9a4216151:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:18,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:18,330 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:18,331 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:18,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:18,331 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): bd1ea0efe9f4783ac621c9c9a4216151/info is initiating minor compaction (all files) 2024-11-23T19:39:18,331 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bd1ea0efe9f4783ac621c9c9a4216151/info in TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:18,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T19:39:18,332 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/0776b58665134f679fa8a95201642c19, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/6ed020b815ae45bbb61677bd78a33f7c] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp, totalSize=49.3 K 2024-11-23T19:39:18,332 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0776b58665134f679fa8a95201642c19, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732390756233 2024-11-23T19:39:18,332 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting f45ce4970f2f4591bbe73c35eef97186, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1732390756245 2024-11-23T19:39:18,333 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ed020b815ae45bbb61677bd78a33f7c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732390756284 2024-11-23T19:39:18,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/414a91dfd834414abf058765256bcc8e is 1080, key is row0034/info:/1732390758304/Put/seqid=0 2024-11-23T19:39:18,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741840_1016 (size=17894) 2024-11-23T19:39:18,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741840_1016 (size=17894) 2024-11-23T19:39:18,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/414a91dfd834414abf058765256bcc8e 2024-11-23T19:39:18,351 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bd1ea0efe9f4783ac621c9c9a4216151#info#compaction#60 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:18,351 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/912f2b61c15f489589cf2da3e4a7d2b1 is 1080, key is row0001/info:/1732390756233/Put/seqid=0 2024-11-23T19:39:18,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/414a91dfd834414abf058765256bcc8e as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/414a91dfd834414abf058765256bcc8e 2024-11-23T19:39:18,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741841_1017 (size=40670) 2024-11-23T19:39:18,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741841_1017 (size=40670) 2024-11-23T19:39:18,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/414a91dfd834414abf058765256bcc8e, entries=12, sequenceid=58, filesize=17.5 K 2024-11-23T19:39:18,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for bd1ea0efe9f4783ac621c9c9a4216151 in 31ms, sequenceid=58, compaction requested=false 2024-11-23T19:39:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:18,363 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.8 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,363 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,363 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186 because midkey is the same as first or last row 2024-11-23T19:39:18,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:18,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-23T19:39:18,366 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/912f2b61c15f489589cf2da3e4a7d2b1 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 2024-11-23T19:39:18,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/7347fe1ce88b4e5fba45aa8099639c00 is 1080, key is row0046/info:/1732390758333/Put/seqid=0 2024-11-23T19:39:18,372 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bd1ea0efe9f4783ac621c9c9a4216151/info of bd1ea0efe9f4783ac621c9c9a4216151 into 912f2b61c15f489589cf2da3e4a7d2b1(size=39.7 K), total size for store is 57.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:18,373 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., storeName=bd1ea0efe9f4783ac621c9c9a4216151/info, priority=13, startTime=1732390758330; duration=0sec 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741842_1018 (size=20064) 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 because midkey is the same as first or last row 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 because midkey is the same as first or last row 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,373 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,374 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 because midkey is the same as first or last row 2024-11-23T19:39:18,374 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:18,374 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bd1ea0efe9f4783ac621c9c9a4216151:info 2024-11-23T19:39:18,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741842_1018 (size=20064) 2024-11-23T19:39:18,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/7347fe1ce88b4e5fba45aa8099639c00 2024-11-23T19:39:18,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/7347fe1ce88b4e5fba45aa8099639c00 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/7347fe1ce88b4e5fba45aa8099639c00 2024-11-23T19:39:18,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/7347fe1ce88b4e5fba45aa8099639c00, entries=14, sequenceid=75, filesize=19.6 K 2024-11-23T19:39:18,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=5.25 KB/5380 for bd1ea0efe9f4783ac621c9c9a4216151 in 23ms, sequenceid=75, compaction requested=true 2024-11-23T19:39:18,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:18,387 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=76.8 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,387 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,387 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 because midkey is the same as first or last row 2024-11-23T19:39:18,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bd1ea0efe9f4783ac621c9c9a4216151:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:18,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:18,387 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:18,388 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78628 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:18,388 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): bd1ea0efe9f4783ac621c9c9a4216151/info is initiating minor compaction (all files) 2024-11-23T19:39:18,388 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bd1ea0efe9f4783ac621c9c9a4216151/info in TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:18,388 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/414a91dfd834414abf058765256bcc8e, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/7347fe1ce88b4e5fba45aa8099639c00] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp, totalSize=76.8 K 2024-11-23T19:39:18,389 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 912f2b61c15f489589cf2da3e4a7d2b1, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732390756233 2024-11-23T19:39:18,389 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 414a91dfd834414abf058765256bcc8e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1732390758304 2024-11-23T19:39:18,389 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7347fe1ce88b4e5fba45aa8099639c00, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732390758333 2024-11-23T19:39:18,402 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bd1ea0efe9f4783ac621c9c9a4216151#info#compaction#62 average throughput is 20.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:18,402 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/49f71f04ad884ee4a380fd802d2b7827 is 1080, key is row0001/info:/1732390756233/Put/seqid=0 2024-11-23T19:39:18,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741843_1019 (size=68843) 2024-11-23T19:39:18,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741843_1019 (size=68843) 2024-11-23T19:39:18,414 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/49f71f04ad884ee4a380fd802d2b7827 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 2024-11-23T19:39:18,421 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bd1ea0efe9f4783ac621c9c9a4216151/info of bd1ea0efe9f4783ac621c9c9a4216151 into 49f71f04ad884ee4a380fd802d2b7827(size=67.2 K), total size for store is 67.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:18,421 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:18,421 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., storeName=bd1ea0efe9f4783ac621c9c9a4216151/info, priority=13, startTime=1732390758387; duration=0sec 2024-11-23T19:39:18,421 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,421 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 because midkey is the same as first or last row 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 because midkey is the same as first or last row 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.2 K, sizeToCheck=16.0 K 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 because midkey is the same as first or last row 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:18,422 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bd1ea0efe9f4783ac621c9c9a4216151:info 2024-11-23T19:39:18,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:18,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:19,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:19,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:20,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:20,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/b582dd3aba2d48fe9c4a7d16c0df7f02 is 1080, key is row0060/info:/1732390758365/Put/seqid=0 2024-11-23T19:39:20,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741844_1020 (size=12509) 2024-11-23T19:39:20,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741844_1020 (size=12509) 2024-11-23T19:39:20,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/b582dd3aba2d48fe9c4a7d16c0df7f02 2024-11-23T19:39:20,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/b582dd3aba2d48fe9c4a7d16c0df7f02 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/b582dd3aba2d48fe9c4a7d16c0df7f02 2024-11-23T19:39:20,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/b582dd3aba2d48fe9c4a7d16c0df7f02, entries=7, sequenceid=87, filesize=12.2 K 2024-11-23T19:39:20,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for bd1ea0efe9f4783ac621c9c9a4216151 in 25ms, sequenceid=87, compaction requested=false 2024-11-23T19:39:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.4 K, sizeToCheck=16.0 K 2024-11-23T19:39:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 because midkey is the same as first or last row 2024-11-23T19:39:20,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-23T19:39:20,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/c6c1881d675d46db96c5ce1e1d96ac9f is 1080, key is row0067/info:/1732390760386/Put/seqid=0 2024-11-23T19:39:20,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741845_1021 (size=16817) 2024-11-23T19:39:20,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741845_1021 (size=16817) 2024-11-23T19:39:20,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/c6c1881d675d46db96c5ce1e1d96ac9f 2024-11-23T19:39:20,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/c6c1881d675d46db96c5ce1e1d96ac9f as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/c6c1881d675d46db96c5ce1e1d96ac9f 2024-11-23T19:39:20,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/c6c1881d675d46db96c5ce1e1d96ac9f, entries=11, sequenceid=101, filesize=16.4 K 2024-11-23T19:39:20,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for bd1ea0efe9f4783ac621c9c9a4216151 in 22ms, sequenceid=101, compaction requested=true 2024-11-23T19:39:20,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:20,431 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-11-23T19:39:20,431 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:20,431 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 because midkey is the same as first or last row 2024-11-23T19:39:20,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bd1ea0efe9f4783ac621c9c9a4216151:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:20,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:20,431 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:20,432 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:20,432 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): bd1ea0efe9f4783ac621c9c9a4216151/info is initiating minor compaction (all files) 2024-11-23T19:39:20,432 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bd1ea0efe9f4783ac621c9c9a4216151/info in TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:20,432 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/b582dd3aba2d48fe9c4a7d16c0df7f02, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/c6c1881d675d46db96c5ce1e1d96ac9f] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp, totalSize=95.9 K 2024-11-23T19:39:20,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T19:39:20,433 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 49f71f04ad884ee4a380fd802d2b7827, keycount=59, bloomtype=ROW, size=67.2 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732390756233 2024-11-23T19:39:20,433 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting b582dd3aba2d48fe9c4a7d16c0df7f02, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732390758365 2024-11-23T19:39:20,434 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6c1881d675d46db96c5ce1e1d96ac9f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732390760386 2024-11-23T19:39:20,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/fdc06ef66c764111836cf42ffc20cb56 is 1080, key is row0078/info:/1732390760410/Put/seqid=0 2024-11-23T19:39:20,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741846_1022 (size=17894) 2024-11-23T19:39:20,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741846_1022 (size=17894) 2024-11-23T19:39:20,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/fdc06ef66c764111836cf42ffc20cb56 2024-11-23T19:39:20,447 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bd1ea0efe9f4783ac621c9c9a4216151#info#compaction#66 average throughput is 26.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:20,448 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/5c17896b20864efba2cbd43b3dc79c31 is 1080, key is row0001/info:/1732390756233/Put/seqid=0 2024-11-23T19:39:20,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/fdc06ef66c764111836cf42ffc20cb56 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/fdc06ef66c764111836cf42ffc20cb56 2024-11-23T19:39:20,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741847_1023 (size=88408) 2024-11-23T19:39:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741847_1023 (size=88408) 2024-11-23T19:39:20,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/fdc06ef66c764111836cf42ffc20cb56, entries=12, sequenceid=116, filesize=17.5 K 2024-11-23T19:39:20,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for bd1ea0efe9f4783ac621c9c9a4216151 in 23ms, sequenceid=116, compaction requested=false 2024-11-23T19:39:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.3 K, sizeToCheck=16.0 K 2024-11-23T19:39:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:20,456 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 because midkey is the same as first or last row 2024-11-23T19:39:20,458 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/5c17896b20864efba2cbd43b3dc79c31 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31 2024-11-23T19:39:20,463 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in bd1ea0efe9f4783ac621c9c9a4216151/info of bd1ea0efe9f4783ac621c9c9a4216151 into 5c17896b20864efba2cbd43b3dc79c31(size=86.3 K), total size for store is 103.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bd1ea0efe9f4783ac621c9c9a4216151: 2024-11-23T19:39:20,464 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., storeName=bd1ea0efe9f4783ac621c9c9a4216151/info, priority=13, startTime=1732390760431; duration=0sec 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-23T19:39:20,464 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-23T19:39:20,465 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:20,465 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:20,465 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bd1ea0efe9f4783ac621c9c9a4216151:info 2024-11-23T19:39:20,466 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42085 {}] assignment.AssignmentManager(1363): Split request from 387b213c044a,41609,1732390744769, parent={ENCODED => bd1ea0efe9f4783ac621c9c9a4216151, NAME => 'TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-23T19:39:20,469 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42085 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=387b213c044a,41609,1732390744769 2024-11-23T19:39:20,472 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42085 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd1ea0efe9f4783ac621c9c9a4216151, daughterA=d94ef5b652f6001b9acee3e178b4f374, daughterB=26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:20,473 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd1ea0efe9f4783ac621c9c9a4216151, daughterA=d94ef5b652f6001b9acee3e178b4f374, daughterB=26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:20,473 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd1ea0efe9f4783ac621c9c9a4216151, daughterA=d94ef5b652f6001b9acee3e178b4f374, daughterB=26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:20,473 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd1ea0efe9f4783ac621c9c9a4216151, daughterA=d94ef5b652f6001b9acee3e178b4f374, daughterB=26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:20,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, UNASSIGN}] 2024-11-23T19:39:20,481 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, UNASSIGN 2024-11-23T19:39:20,483 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bd1ea0efe9f4783ac621c9c9a4216151, regionState=CLOSING, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:20,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, UNASSIGN because future has completed 2024-11-23T19:39:20,485 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T19:39:20,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd1ea0efe9f4783ac621c9c9a4216151, server=387b213c044a,41609,1732390744769}] 2024-11-23T19:39:20,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:20,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:20,643 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,643 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T19:39:20,644 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing bd1ea0efe9f4783ac621c9c9a4216151, disabling compactions & flushes 2024-11-23T19:39:20,644 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:20,644 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:20,644 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. after waiting 0 ms 2024-11-23T19:39:20,645 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:20,645 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing bd1ea0efe9f4783ac621c9c9a4216151 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:20,650 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/bf8cf6dd8935491eba3d0449a6964ae2 is 1080, key is row0090/info:/1732390760434/Put/seqid=0 2024-11-23T19:39:20,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741848_1024 (size=12509) 2024-11-23T19:39:20,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741848_1024 (size=12509) 2024-11-23T19:39:20,655 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/bf8cf6dd8935491eba3d0449a6964ae2 2024-11-23T19:39:20,661 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/.tmp/info/bf8cf6dd8935491eba3d0449a6964ae2 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/bf8cf6dd8935491eba3d0449a6964ae2 2024-11-23T19:39:20,666 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/bf8cf6dd8935491eba3d0449a6964ae2, entries=7, sequenceid=127, filesize=12.2 K 2024-11-23T19:39:20,667 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bd1ea0efe9f4783ac621c9c9a4216151 in 22ms, sequenceid=127, compaction requested=true 2024-11-23T19:39:20,668 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/0776b58665134f679fa8a95201642c19, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/6ed020b815ae45bbb61677bd78a33f7c, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/414a91dfd834414abf058765256bcc8e, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/7347fe1ce88b4e5fba45aa8099639c00, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/b582dd3aba2d48fe9c4a7d16c0df7f02, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/c6c1881d675d46db96c5ce1e1d96ac9f] to archive 2024-11-23T19:39:20,669 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T19:39:20,671 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/0776b58665134f679fa8a95201642c19 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/0776b58665134f679fa8a95201642c19 2024-11-23T19:39:20,672 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/f45ce4970f2f4591bbe73c35eef97186 2024-11-23T19:39:20,674 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/912f2b61c15f489589cf2da3e4a7d2b1 2024-11-23T19:39:20,675 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/6ed020b815ae45bbb61677bd78a33f7c to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/6ed020b815ae45bbb61677bd78a33f7c 2024-11-23T19:39:20,676 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/414a91dfd834414abf058765256bcc8e to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/414a91dfd834414abf058765256bcc8e 2024-11-23T19:39:20,677 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/49f71f04ad884ee4a380fd802d2b7827 2024-11-23T19:39:20,678 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/7347fe1ce88b4e5fba45aa8099639c00 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/7347fe1ce88b4e5fba45aa8099639c00 2024-11-23T19:39:20,680 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/b582dd3aba2d48fe9c4a7d16c0df7f02 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/b582dd3aba2d48fe9c4a7d16c0df7f02 2024-11-23T19:39:20,682 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/c6c1881d675d46db96c5ce1e1d96ac9f to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/c6c1881d675d46db96c5ce1e1d96ac9f 2024-11-23T19:39:20,690 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-23T19:39:20,690 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. 2024-11-23T19:39:20,690 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for bd1ea0efe9f4783ac621c9c9a4216151: Waiting for close lock at 1732390760644Running coprocessor pre-close hooks at 1732390760644Disabling compacts and flushes for region at 1732390760644Disabling writes for close at 1732390760645 (+1 ms)Obtaining lock to block concurrent updates at 1732390760645Preparing flush snapshotting stores in bd1ea0efe9f4783ac621c9c9a4216151 at 1732390760645Finished memstore snapshotting TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., syncing WAL and waiting on mvcc, flushsize=dataSize=7532, getHeapSize=8304, getOffHeapSize=0, getCellsCount=7 at 1732390760645Flushing stores of TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. at 1732390760646 (+1 ms)Flushing bd1ea0efe9f4783ac621c9c9a4216151/info: creating writer at 1732390760646Flushing bd1ea0efe9f4783ac621c9c9a4216151/info: appending metadata at 1732390760650 (+4 ms)Flushing bd1ea0efe9f4783ac621c9c9a4216151/info: closing flushed file at 1732390760650Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f58256d: reopening flushed file at 1732390760660 (+10 ms)Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for bd1ea0efe9f4783ac621c9c9a4216151 in 22ms, sequenceid=127, compaction requested=true at 1732390760667 (+7 ms)Writing region close event to WAL at 1732390760685 (+18 ms)Running coprocessor post-close hooks at 1732390760690 (+5 ms)Closed at 1732390760690 2024-11-23T19:39:20,693 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,693 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=bd1ea0efe9f4783ac621c9c9a4216151, regionState=CLOSED 2024-11-23T19:39:20,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd1ea0efe9f4783ac621c9c9a4216151, server=387b213c044a,41609,1732390744769 because future has completed 2024-11-23T19:39:20,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-23T19:39:20,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure bd1ea0efe9f4783ac621c9c9a4216151, server=387b213c044a,41609,1732390744769 in 211 msec 2024-11-23T19:39:20,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T19:39:20,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bd1ea0efe9f4783ac621c9c9a4216151, UNASSIGN in 219 msec 2024-11-23T19:39:20,711 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:20,715 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=bd1ea0efe9f4783ac621c9c9a4216151, threads=3 2024-11-23T19:39:20,718 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31 for region: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,718 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/bf8cf6dd8935491eba3d0449a6964ae2 for region: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,718 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/fdc06ef66c764111836cf42ffc20cb56 for region: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,729 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/bf8cf6dd8935491eba3d0449a6964ae2, top=true 2024-11-23T19:39:20,729 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/fdc06ef66c764111836cf42ffc20cb56, top=true 2024-11-23T19:39:20,734 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56 for child: 26adb307ab68e8fcb9782771920a2811, parent: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,734 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/fdc06ef66c764111836cf42ffc20cb56 for region: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741849_1025 (size=27) 2024-11-23T19:39:20,735 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2 for child: 26adb307ab68e8fcb9782771920a2811, parent: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,735 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/bf8cf6dd8935491eba3d0449a6964ae2 for region: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741849_1025 (size=27) 2024-11-23T19:39:20,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741850_1026 (size=27) 2024-11-23T19:39:20,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741850_1026 (size=27) 2024-11-23T19:39:20,746 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31 for region: bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:20,748 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region bd1ea0efe9f4783ac621c9c9a4216151 Daughter A: [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151] storefiles, Daughter B: [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56] storefiles. 2024-11-23T19:39:20,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741851_1027 (size=71) 2024-11-23T19:39:20,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741851_1027 (size=71) 2024-11-23T19:39:20,757 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741852_1028 (size=71) 2024-11-23T19:39:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741852_1028 (size=71) 2024-11-23T19:39:20,769 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:20,778 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-23T19:39:20,781 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-23T19:39:20,783 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732390760783"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732390760783"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732390760783"}]},"ts":"1732390760783"} 2024-11-23T19:39:20,783 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732390760783"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390760783"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732390760783"}]},"ts":"1732390760783"} 2024-11-23T19:39:20,784 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732390760783"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732390760783"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732390760783"}]},"ts":"1732390760783"} 2024-11-23T19:39:20,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d94ef5b652f6001b9acee3e178b4f374, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=26adb307ab68e8fcb9782771920a2811, ASSIGN}] 2024-11-23T19:39:20,801 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d94ef5b652f6001b9acee3e178b4f374, ASSIGN 2024-11-23T19:39:20,801 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=26adb307ab68e8fcb9782771920a2811, ASSIGN 2024-11-23T19:39:20,802 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d94ef5b652f6001b9acee3e178b4f374, ASSIGN; state=SPLITTING_NEW, location=387b213c044a,41609,1732390744769; forceNewPlan=false, retain=false 2024-11-23T19:39:20,802 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=26adb307ab68e8fcb9782771920a2811, ASSIGN; state=SPLITTING_NEW, location=387b213c044a,41609,1732390744769; forceNewPlan=false, retain=false 2024-11-23T19:39:20,953 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d94ef5b652f6001b9acee3e178b4f374, regionState=OPENING, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:20,953 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=26adb307ab68e8fcb9782771920a2811, regionState=OPENING, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:20,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=26adb307ab68e8fcb9782771920a2811, ASSIGN because future has completed 2024-11-23T19:39:20,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769}] 2024-11-23T19:39:20,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d94ef5b652f6001b9acee3e178b4f374, ASSIGN because future has completed 2024-11-23T19:39:20,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d94ef5b652f6001b9acee3e178b4f374, server=387b213c044a,41609,1732390744769}] 2024-11-23T19:39:21,116 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:21,116 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => d94ef5b652f6001b9acee3e178b4f374, NAME => 'TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-23T19:39:21,116 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,117 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:21,117 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,117 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,118 INFO [StoreOpener-d94ef5b652f6001b9acee3e178b4f374-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,120 INFO [StoreOpener-d94ef5b652f6001b9acee3e178b4f374-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d94ef5b652f6001b9acee3e178b4f374 columnFamilyName info 2024-11-23T19:39:21,120 DEBUG [StoreOpener-d94ef5b652f6001b9acee3e178b4f374-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:21,136 DEBUG [StoreOpener-d94ef5b652f6001b9acee3e178b4f374-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151->hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31-bottom 2024-11-23T19:39:21,136 INFO [StoreOpener-d94ef5b652f6001b9acee3e178b4f374-1 {}] regionserver.HStore(327): Store=d94ef5b652f6001b9acee3e178b4f374/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:21,136 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,137 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,138 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,138 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,138 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,140 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,141 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened d94ef5b652f6001b9acee3e178b4f374; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692747, jitterRate=-0.11912786960601807}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:39:21,141 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:21,141 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for d94ef5b652f6001b9acee3e178b4f374: Running coprocessor pre-open hook at 1732390761117Writing region info on filesystem at 1732390761117Initializing all the Stores at 1732390761118 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390761118Cleaning up temporary data from old regions at 1732390761138 (+20 ms)Running coprocessor post-open hooks at 1732390761141 (+3 ms)Region opened successfully at 1732390761141 2024-11-23T19:39:21,142 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374., pid=13, masterSystemTime=1732390761108 2024-11-23T19:39:21,142 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store d94ef5b652f6001b9acee3e178b4f374:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:21,142 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-23T19:39:21,142 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:21,143 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:21,143 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): d94ef5b652f6001b9acee3e178b4f374/info is initiating minor compaction (all files) 2024-11-23T19:39:21,143 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d94ef5b652f6001b9acee3e178b4f374/info in TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:21,143 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151->hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31-bottom] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/.tmp, totalSize=86.3 K 2024-11-23T19:39:21,144 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732390756233 2024-11-23T19:39:21,144 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:21,144 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:21,144 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:21,145 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 26adb307ab68e8fcb9782771920a2811, NAME => 'TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-23T19:39:21,145 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,145 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d94ef5b652f6001b9acee3e178b4f374, regionState=OPEN, openSeqNum=131, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:21,145 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:21,145 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,145 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,146 INFO [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,147 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-23T19:39:21,147 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-23T19:39:21,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-23T19:39:21,147 INFO [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26adb307ab68e8fcb9782771920a2811 columnFamilyName info 2024-11-23T19:39:21,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d94ef5b652f6001b9acee3e178b4f374, server=387b213c044a,41609,1732390744769 because future has completed 2024-11-23T19:39:21,147 DEBUG [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:21,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-23T19:39:21,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure d94ef5b652f6001b9acee3e178b4f374, server=387b213c044a,41609,1732390744769 in 191 msec 2024-11-23T19:39:21,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d94ef5b652f6001b9acee3e178b4f374, ASSIGN in 351 msec 2024-11-23T19:39:21,161 DEBUG [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151->hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31-top 2024-11-23T19:39:21,165 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d94ef5b652f6001b9acee3e178b4f374#info#compaction#68 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:21,165 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/.tmp/info/aff6fd7464cb470fbf1a22a93ee99b80 is 1080, key is row0001/info:/1732390756233/Put/seqid=0 2024-11-23T19:39:21,166 DEBUG [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2 2024-11-23T19:39:21,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741853_1029 (size=70862) 2024-11-23T19:39:21,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741853_1029 (size=70862) 2024-11-23T19:39:21,171 DEBUG [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56 2024-11-23T19:39:21,171 INFO [StoreOpener-26adb307ab68e8fcb9782771920a2811-1 {}] regionserver.HStore(327): Store=26adb307ab68e8fcb9782771920a2811/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:21,172 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,172 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,173 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,174 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,174 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/info/7903f5675e15485e97093543ca1927f3 is 193, key is TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811./info:regioninfo/1732390760953/Put/seqid=0 2024-11-23T19:39:21,176 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,176 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/.tmp/info/aff6fd7464cb470fbf1a22a93ee99b80 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/aff6fd7464cb470fbf1a22a93ee99b80 2024-11-23T19:39:21,177 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 26adb307ab68e8fcb9782771920a2811; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783710, jitterRate=-0.0034619569778442383}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T19:39:21,177 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:21,177 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 26adb307ab68e8fcb9782771920a2811: Running coprocessor pre-open hook at 1732390761145Writing region info on filesystem at 1732390761145Initializing all the Stores at 1732390761146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390761146Cleaning up temporary data from old regions at 1732390761174 (+28 ms)Running coprocessor post-open hooks at 1732390761177 (+3 ms)Region opened successfully at 1732390761177 2024-11-23T19:39:21,178 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., pid=12, masterSystemTime=1732390761108 2024-11-23T19:39:21,178 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 2 2024-11-23T19:39:21,178 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:21,178 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:21,179 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:21,179 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:21,180 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:21,180 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151->hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31-top, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=116.0 K 2024-11-23T19:39:21,181 DEBUG [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:21,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741854_1030 (size=9847) 2024-11-23T19:39:21,181 INFO [RS_OPEN_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:21,181 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.Compactor(225): Compacting 5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732390756233 2024-11-23T19:39:21,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741854_1030 (size=9847) 2024-11-23T19:39:21,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/info/7903f5675e15485e97093543ca1927f3 2024-11-23T19:39:21,182 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=26adb307ab68e8fcb9782771920a2811, regionState=OPEN, openSeqNum=131, regionLocation=387b213c044a,41609,1732390744769 2024-11-23T19:39:21,183 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732390760410 2024-11-23T19:39:21,184 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732390760434 2024-11-23T19:39:21,184 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in d94ef5b652f6001b9acee3e178b4f374/info of d94ef5b652f6001b9acee3e178b4f374 into aff6fd7464cb470fbf1a22a93ee99b80(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:21,184 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d94ef5b652f6001b9acee3e178b4f374: 2024-11-23T19:39:21,184 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374., storeName=d94ef5b652f6001b9acee3e178b4f374/info, priority=15, startTime=1732390761142; duration=0sec 2024-11-23T19:39:21,184 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:21,184 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d94ef5b652f6001b9acee3e178b4f374:info 2024-11-23T19:39:21,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769 because future has completed 2024-11-23T19:39:21,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-23T19:39:21,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769 in 230 msec 2024-11-23T19:39:21,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-23T19:39:21,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=26adb307ab68e8fcb9782771920a2811, ASSIGN in 388 msec 2024-11-23T19:39:21,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=bd1ea0efe9f4783ac621c9c9a4216151, daughterA=d94ef5b652f6001b9acee3e178b4f374, daughterB=26adb307ab68e8fcb9782771920a2811 in 722 msec 2024-11-23T19:39:21,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/ns/75f40ff2aa5f4f6badc29428faff518a is 43, key is default/ns:d/1732390746115/Put/seqid=0 2024-11-23T19:39:21,208 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#70 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:21,208 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/88266589d4544d87be74f5c71b1093c0 is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:21,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741855_1031 (size=5153) 2024-11-23T19:39:21,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741855_1031 (size=5153) 2024-11-23T19:39:21,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/ns/75f40ff2aa5f4f6badc29428faff518a 2024-11-23T19:39:21,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741856_1032 (size=42984) 2024-11-23T19:39:21,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741856_1032 (size=42984) 2024-11-23T19:39:21,228 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/88266589d4544d87be74f5c71b1093c0 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/88266589d4544d87be74f5c71b1093c0 2024-11-23T19:39:21,234 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into 88266589d4544d87be74f5c71b1093c0(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:21,234 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:21,234 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390761178; duration=0sec 2024-11-23T19:39:21,234 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:21,234 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:21,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/table/7f8c1eb03b9d4e69b4316d60e4e9aabd is 65, key is TestLogRolling-testLogRolling/table:state/1732390746587/Put/seqid=0 2024-11-23T19:39:21,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741857_1033 (size=5340) 2024-11-23T19:39:21,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741857_1033 (size=5340) 2024-11-23T19:39:21,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/table/7f8c1eb03b9d4e69b4316d60e4e9aabd 2024-11-23T19:39:21,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/info/7903f5675e15485e97093543ca1927f3 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/info/7903f5675e15485e97093543ca1927f3 2024-11-23T19:39:21,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/info/7903f5675e15485e97093543ca1927f3, entries=30, sequenceid=17, filesize=9.6 K 2024-11-23T19:39:21,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/ns/75f40ff2aa5f4f6badc29428faff518a as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/ns/75f40ff2aa5f4f6badc29428faff518a 2024-11-23T19:39:21,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/ns/75f40ff2aa5f4f6badc29428faff518a, entries=2, sequenceid=17, filesize=5.0 K 2024-11-23T19:39:21,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/table/7f8c1eb03b9d4e69b4316d60e4e9aabd as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/table/7f8c1eb03b9d4e69b4316d60e4e9aabd 2024-11-23T19:39:21,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/table/7f8c1eb03b9d4e69b4316d60e4e9aabd, entries=2, sequenceid=17, filesize=5.2 K 2024-11-23T19:39:21,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 116ms, sequenceid=17, compaction requested=false 2024-11-23T19:39:21,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-23T19:39:21,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:21,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:22,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:38260 deadline: 1732390772452, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. is not online on 387b213c044a,41609,1732390744769 2024-11-23T19:39:22,481 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., hostname=387b213c044a,41609,1732390744769, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., hostname=387b213c044a,41609,1732390744769, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. is not online on 387b213c044a,41609,1732390744769 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T19:39:22,482 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., hostname=387b213c044a,41609,1732390744769, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151. is not online on 387b213c044a,41609,1732390744769 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T19:39:22,482 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732390746216.bd1ea0efe9f4783ac621c9c9a4216151., hostname=387b213c044a,41609,1732390744769, seqNum=2 from cache 2024-11-23T19:39:22,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:22,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:23,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:23,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:24,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:24,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:25,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:25,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:25,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:25,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,227 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-23T19:39:26,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:26,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:26,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:27,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:27,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:28,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:28,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:29,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:29,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:30,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:30,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:31,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:31,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:32,504 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., hostname=387b213c044a,41609,1732390744769, seqNum=131] 2024-11-23T19:39:32,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:32,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:32,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/fb186d23b75648dfaa099f6e3cc2e6a5 is 1080, key is row0097/info:/1732390772505/Put/seqid=0 2024-11-23T19:39:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741858_1034 (size=12516) 2024-11-23T19:39:32,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741858_1034 (size=12516) 2024-11-23T19:39:32,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/fb186d23b75648dfaa099f6e3cc2e6a5 2024-11-23T19:39:32,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/fb186d23b75648dfaa099f6e3cc2e6a5 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/fb186d23b75648dfaa099f6e3cc2e6a5 2024-11-23T19:39:32,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/fb186d23b75648dfaa099f6e3cc2e6a5, entries=7, sequenceid=141, filesize=12.2 K 2024-11-23T19:39:32,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 26adb307ab68e8fcb9782771920a2811 in 38ms, sequenceid=141, compaction requested=false 2024-11-23T19:39:32,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:32,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:32,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-23T19:39:32,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/5875f90489fd48a58dbfaebed3d33318 is 1080, key is row0104/info:/1732390772520/Put/seqid=0 2024-11-23T19:39:32,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741859_1035 (size=22238) 2024-11-23T19:39:32,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741859_1035 (size=22238) 2024-11-23T19:39:32,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/5875f90489fd48a58dbfaebed3d33318 2024-11-23T19:39:32,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/5875f90489fd48a58dbfaebed3d33318 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5875f90489fd48a58dbfaebed3d33318 2024-11-23T19:39:32,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5875f90489fd48a58dbfaebed3d33318, entries=16, sequenceid=160, filesize=21.7 K 2024-11-23T19:39:32,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 26adb307ab68e8fcb9782771920a2811 in 20ms, sequenceid=160, compaction requested=true 2024-11-23T19:39:32,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:32,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:32,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:32,578 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:32,579 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77738 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:32,579 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:32,579 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:32,579 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/88266589d4544d87be74f5c71b1093c0, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/fb186d23b75648dfaa099f6e3cc2e6a5, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5875f90489fd48a58dbfaebed3d33318] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=75.9 K 2024-11-23T19:39:32,580 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 88266589d4544d87be74f5c71b1093c0, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732390758369 2024-11-23T19:39:32,580 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb186d23b75648dfaa099f6e3cc2e6a5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732390772505 2024-11-23T19:39:32,580 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5875f90489fd48a58dbfaebed3d33318, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732390772520 2024-11-23T19:39:32,592 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#75 average throughput is 29.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:32,593 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/81c6d1c4b23d48d0a419dff52a3ff838 is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:32,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741860_1036 (size=67948) 2024-11-23T19:39:32,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741860_1036 (size=67948) 2024-11-23T19:39:32,603 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/81c6d1c4b23d48d0a419dff52a3ff838 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/81c6d1c4b23d48d0a419dff52a3ff838 2024-11-23T19:39:32,608 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into 81c6d1c4b23d48d0a419dff52a3ff838(size=66.4 K), total size for store is 66.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:32,608 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:32,608 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390772578; duration=0sec 2024-11-23T19:39:32,608 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:32,608 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:32,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:32,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:33,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:33,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:34,516 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T19:39:34,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:34,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-23T19:39:34,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/448d9e49321d4cd591afe9663e6bfa4b is 1080, key is row0120/info:/1732390772559/Put/seqid=0 2024-11-23T19:39:34,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741861_1037 (size=15750) 2024-11-23T19:39:34,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741861_1037 (size=15750) 2024-11-23T19:39:34,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/448d9e49321d4cd591afe9663e6bfa4b 2024-11-23T19:39:34,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/448d9e49321d4cd591afe9663e6bfa4b as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/448d9e49321d4cd591afe9663e6bfa4b 2024-11-23T19:39:34,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/448d9e49321d4cd591afe9663e6bfa4b, entries=10, sequenceid=174, filesize=15.4 K 2024-11-23T19:39:34,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for 26adb307ab68e8fcb9782771920a2811 in 23ms, sequenceid=174, compaction requested=false 2024-11-23T19:39:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:34,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:34,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T19:39:34,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/7a979eefec4848d993eca22646923841 is 1080, key is row0130/info:/1732390774579/Put/seqid=0 2024-11-23T19:39:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741862_1038 (size=17906) 2024-11-23T19:39:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741862_1038 (size=17906) 2024-11-23T19:39:34,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/7a979eefec4848d993eca22646923841 2024-11-23T19:39:34,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/7a979eefec4848d993eca22646923841 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7a979eefec4848d993eca22646923841 2024-11-23T19:39:34,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7a979eefec4848d993eca22646923841, entries=12, sequenceid=189, filesize=17.5 K 2024-11-23T19:39:34,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 26adb307ab68e8fcb9782771920a2811 in 21ms, sequenceid=189, compaction requested=true 2024-11-23T19:39:34,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:34,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:34,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:34,623 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:34,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:34,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-23T19:39:34,624 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101604 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:34,624 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:34,624 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:34,625 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/81c6d1c4b23d48d0a419dff52a3ff838, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/448d9e49321d4cd591afe9663e6bfa4b, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7a979eefec4848d993eca22646923841] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=99.2 K 2024-11-23T19:39:34,625 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81c6d1c4b23d48d0a419dff52a3ff838, keycount=58, bloomtype=ROW, size=66.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732390758369 2024-11-23T19:39:34,626 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 448d9e49321d4cd591afe9663e6bfa4b, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732390772559 2024-11-23T19:39:34,626 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a979eefec4848d993eca22646923841, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1732390774579 2024-11-23T19:39:34,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:34,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:34,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/63acbd560ca14311a30a2b16fe8f5175 is 1080, key is row0142/info:/1732390774603/Put/seqid=0 2024-11-23T19:39:34,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741863_1039 (size=16828) 2024-11-23T19:39:34,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741863_1039 (size=16828) 2024-11-23T19:39:34,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/63acbd560ca14311a30a2b16fe8f5175 2024-11-23T19:39:34,644 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#79 average throughput is 27.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:34,644 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/aadb53d088dc4814b00b7031b7d0ecbf is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/63acbd560ca14311a30a2b16fe8f5175 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/63acbd560ca14311a30a2b16fe8f5175 2024-11-23T19:39:34,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741864_1040 (size=91843) 2024-11-23T19:39:34,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741864_1040 (size=91843) 2024-11-23T19:39:34,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/63acbd560ca14311a30a2b16fe8f5175, entries=11, sequenceid=203, filesize=16.4 K 2024-11-23T19:39:34,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 26adb307ab68e8fcb9782771920a2811 in 28ms, sequenceid=203, compaction requested=false 2024-11-23T19:39:34,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:34,656 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/aadb53d088dc4814b00b7031b7d0ecbf as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aadb53d088dc4814b00b7031b7d0ecbf 2024-11-23T19:39:34,663 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into aadb53d088dc4814b00b7031b7d0ecbf(size=89.7 K), total size for store is 106.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:34,663 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:34,663 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390774623; duration=0sec 2024-11-23T19:39:34,663 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:34,663 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:35,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:35,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:36,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:36,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:36,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:36,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-23T19:39:36,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/d28093bd36ec4cf79ac1e02a27c44576 is 1080, key is row0153/info:/1732390774625/Put/seqid=0 2024-11-23T19:39:36,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741865_1041 (size=14672) 2024-11-23T19:39:36,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741865_1041 (size=14672) 2024-11-23T19:39:36,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/d28093bd36ec4cf79ac1e02a27c44576 2024-11-23T19:39:36,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/d28093bd36ec4cf79ac1e02a27c44576 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d28093bd36ec4cf79ac1e02a27c44576 2024-11-23T19:39:36,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d28093bd36ec4cf79ac1e02a27c44576, entries=9, sequenceid=216, filesize=14.3 K 2024-11-23T19:39:36,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=17.86 KB/18292 for 26adb307ab68e8fcb9782771920a2811 in 36ms, sequenceid=216, compaction requested=true 2024-11-23T19:39:36,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:36,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:36,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:36,685 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:36,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-23T19:39:36,686 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:36,686 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:36,686 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:36,687 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aadb53d088dc4814b00b7031b7d0ecbf, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/63acbd560ca14311a30a2b16fe8f5175, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d28093bd36ec4cf79ac1e02a27c44576] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=120.5 K 2024-11-23T19:39:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:36,687 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting aadb53d088dc4814b00b7031b7d0ecbf, keycount=80, bloomtype=ROW, size=89.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1732390758369 2024-11-23T19:39:36,688 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63acbd560ca14311a30a2b16fe8f5175, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732390774603 2024-11-23T19:39:36,688 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting d28093bd36ec4cf79ac1e02a27c44576, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732390774625 2024-11-23T19:39:36,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/e59c79f35aa340b9a36696f651d7713f is 1080, key is row0162/info:/1732390776649/Put/seqid=0 2024-11-23T19:39:36,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741866_1042 (size=24394) 2024-11-23T19:39:36,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741866_1042 (size=24394) 2024-11-23T19:39:36,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/e59c79f35aa340b9a36696f651d7713f 2024-11-23T19:39:36,712 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#82 average throughput is 34.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:36,712 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/7cd4a43419184b37a263ee20f1d8d272 is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:36,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/e59c79f35aa340b9a36696f651d7713f as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/e59c79f35aa340b9a36696f651d7713f 2024-11-23T19:39:36,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/e59c79f35aa340b9a36696f651d7713f, entries=18, sequenceid=237, filesize=23.8 K 2024-11-23T19:39:36,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-23T19:39:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:38260 deadline: 1732390786719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769 2024-11-23T19:39:36,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 26adb307ab68e8fcb9782771920a2811 in 34ms, sequenceid=237, compaction requested=false 2024-11-23T19:39:36,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:36,720 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., hostname=387b213c044a,41609,1732390744769, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., hostname=387b213c044a,41609,1732390744769, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T19:39:36,720 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., hostname=387b213c044a,41609,1732390744769, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=26adb307ab68e8fcb9782771920a2811, server=387b213c044a,41609,1732390744769 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T19:39:36,720 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., hostname=387b213c044a,41609,1732390744769, seqNum=131 because the exception is null or not the one we care about 2024-11-23T19:39:36,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741867_1043 (size=113509) 2024-11-23T19:39:36,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741867_1043 (size=113509) 2024-11-23T19:39:36,731 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/7cd4a43419184b37a263ee20f1d8d272 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7cd4a43419184b37a263ee20f1d8d272 2024-11-23T19:39:36,737 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into 7cd4a43419184b37a263ee20f1d8d272(size=110.8 K), total size for store is 134.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:36,737 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:36,737 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390776685; duration=0sec 2024-11-23T19:39:36,737 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:36,737 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:37,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:37,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:38,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:38,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:39,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:39,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:40,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:40,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:41,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:41,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:43,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:43,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:44,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:44,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:45,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:45,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:46,190 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T19:39:46,190 INFO [master/387b213c044a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T19:39:46,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:46,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:46,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T19:39:46,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/608673df7413446b9d607fba0b6c684e is 1080, key is row0180/info:/1732390776689/Put/seqid=0 2024-11-23T19:39:46,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741868_1044 (size=17906) 2024-11-23T19:39:46,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741868_1044 (size=17906) 2024-11-23T19:39:46,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/608673df7413446b9d607fba0b6c684e 2024-11-23T19:39:46,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/608673df7413446b9d607fba0b6c684e as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/608673df7413446b9d607fba0b6c684e 2024-11-23T19:39:46,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/608673df7413446b9d607fba0b6c684e, entries=12, sequenceid=253, filesize=17.5 K 2024-11-23T19:39:46,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 26adb307ab68e8fcb9782771920a2811 in 24ms, sequenceid=253, compaction requested=true 2024-11-23T19:39:46,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:46,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:46,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:46,806 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:46,807 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155809 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:46,807 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:46,807 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:46,807 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7cd4a43419184b37a263ee20f1d8d272, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/e59c79f35aa340b9a36696f651d7713f, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/608673df7413446b9d607fba0b6c684e] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=152.2 K 2024-11-23T19:39:46,808 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7cd4a43419184b37a263ee20f1d8d272, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732390758369 2024-11-23T19:39:46,808 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting e59c79f35aa340b9a36696f651d7713f, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732390776649 2024-11-23T19:39:46,808 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 608673df7413446b9d607fba0b6c684e, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732390776689 2024-11-23T19:39:46,819 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#84 average throughput is 44.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:46,819 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/6e8787ccdd7a477a96df4a9eaad44e36 is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:46,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741869_1045 (size=146156) 2024-11-23T19:39:46,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741869_1045 (size=146156) 2024-11-23T19:39:46,828 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/6e8787ccdd7a477a96df4a9eaad44e36 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/6e8787ccdd7a477a96df4a9eaad44e36 2024-11-23T19:39:46,833 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into 6e8787ccdd7a477a96df4a9eaad44e36(size=142.7 K), total size for store is 142.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:46,833 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:46,834 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390786806; duration=0sec 2024-11-23T19:39:46,834 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:46,834 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:47,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:47,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:48,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:48,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:48,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:48,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:48,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/aa3b270e2d48407b87d12c76b24b853c is 1080, key is row0192/info:/1732390786785/Put/seqid=0 2024-11-23T19:39:48,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741870_1046 (size=12522) 2024-11-23T19:39:48,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741870_1046 (size=12522) 2024-11-23T19:39:48,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/aa3b270e2d48407b87d12c76b24b853c 2024-11-23T19:39:48,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/aa3b270e2d48407b87d12c76b24b853c as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aa3b270e2d48407b87d12c76b24b853c 2024-11-23T19:39:48,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aa3b270e2d48407b87d12c76b24b853c, entries=7, sequenceid=264, filesize=12.2 K 2024-11-23T19:39:48,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 26adb307ab68e8fcb9782771920a2811 in 23ms, sequenceid=264, compaction requested=false 2024-11-23T19:39:48,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:48,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:48,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T19:39:48,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/8d736b748fe743e4942b18d8072f14d9 is 1080, key is row0199/info:/1732390788805/Put/seqid=0 2024-11-23T19:39:48,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741871_1047 (size=17918) 2024-11-23T19:39:48,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741871_1047 (size=17918) 2024-11-23T19:39:48,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/8d736b748fe743e4942b18d8072f14d9 2024-11-23T19:39:48,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/8d736b748fe743e4942b18d8072f14d9 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/8d736b748fe743e4942b18d8072f14d9 2024-11-23T19:39:48,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/8d736b748fe743e4942b18d8072f14d9, entries=12, sequenceid=279, filesize=17.5 K 2024-11-23T19:39:48,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 26adb307ab68e8fcb9782771920a2811 in 20ms, sequenceid=279, compaction requested=true 2024-11-23T19:39:48,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:48,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:48,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:48,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:48,847 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:48,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-23T19:39:48,848 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176596 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:48,848 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:48,848 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:48,848 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/6e8787ccdd7a477a96df4a9eaad44e36, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aa3b270e2d48407b87d12c76b24b853c, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/8d736b748fe743e4942b18d8072f14d9] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=172.5 K 2024-11-23T19:39:48,849 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6e8787ccdd7a477a96df4a9eaad44e36, keycount=130, bloomtype=ROW, size=142.7 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732390758369 2024-11-23T19:39:48,849 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa3b270e2d48407b87d12c76b24b853c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732390786785 2024-11-23T19:39:48,850 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d736b748fe743e4942b18d8072f14d9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732390788805 2024-11-23T19:39:48,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/a58b2a9393364836b2388e08075676f7 is 1080, key is row0211/info:/1732390788828/Put/seqid=0 2024-11-23T19:39:48,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741872_1048 (size=16839) 2024-11-23T19:39:48,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741872_1048 (size=16839) 2024-11-23T19:39:48,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/a58b2a9393364836b2388e08075676f7 2024-11-23T19:39:48,862 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#88 average throughput is 50.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:48,862 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/021740db59fe42968437aadde8100bff is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:48,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/a58b2a9393364836b2388e08075676f7 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/a58b2a9393364836b2388e08075676f7 2024-11-23T19:39:48,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741873_1049 (size=166762) 2024-11-23T19:39:48,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741873_1049 (size=166762) 2024-11-23T19:39:48,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/a58b2a9393364836b2388e08075676f7, entries=11, sequenceid=293, filesize=16.4 K 2024-11-23T19:39:48,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=3.15 KB/3228 for 26adb307ab68e8fcb9782771920a2811 in 22ms, sequenceid=293, compaction requested=false 2024-11-23T19:39:48,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:48,870 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/021740db59fe42968437aadde8100bff as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/021740db59fe42968437aadde8100bff 2024-11-23T19:39:48,875 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into 021740db59fe42968437aadde8100bff(size=162.9 K), total size for store is 179.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:48,875 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:48,875 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390788847; duration=0sec 2024-11-23T19:39:48,875 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:48,875 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:49,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:49,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:50,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:50,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:50,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:50,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-23T19:39:50,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/625bacf3aca24c01a5cbc2961192fcf6 is 1080, key is row0222/info:/1732390788848/Put/seqid=0 2024-11-23T19:39:50,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741874_1050 (size=12523) 2024-11-23T19:39:50,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741874_1050 (size=12523) 2024-11-23T19:39:50,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/625bacf3aca24c01a5cbc2961192fcf6 2024-11-23T19:39:50,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/625bacf3aca24c01a5cbc2961192fcf6 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/625bacf3aca24c01a5cbc2961192fcf6 2024-11-23T19:39:50,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/625bacf3aca24c01a5cbc2961192fcf6, entries=7, sequenceid=304, filesize=12.2 K 2024-11-23T19:39:50,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 26adb307ab68e8fcb9782771920a2811 in 26ms, sequenceid=304, compaction requested=true 2024-11-23T19:39:50,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:50,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:50,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:50,894 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:50,895 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 196124 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:50,895 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:50,895 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:50,895 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/021740db59fe42968437aadde8100bff, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/a58b2a9393364836b2388e08075676f7, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/625bacf3aca24c01a5cbc2961192fcf6] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=191.5 K 2024-11-23T19:39:50,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:50,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-23T19:39:50,896 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 021740db59fe42968437aadde8100bff, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732390758369 2024-11-23T19:39:50,896 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting a58b2a9393364836b2388e08075676f7, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732390788828 2024-11-23T19:39:50,896 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] compactions.Compactor(225): Compacting 625bacf3aca24c01a5cbc2961192fcf6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732390788848 2024-11-23T19:39:50,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/d1b4da00b17f421187ce38451d1e9dba is 1080, key is row0229/info:/1732390790868/Put/seqid=0 2024-11-23T19:39:50,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741875_1051 (size=17918) 2024-11-23T19:39:50,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741875_1051 (size=17918) 2024-11-23T19:39:50,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/d1b4da00b17f421187ce38451d1e9dba 2024-11-23T19:39:50,909 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#91 average throughput is 57.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:50,909 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/39cf4d05de2e49b0a4e995de84b756a8 is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:50,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/d1b4da00b17f421187ce38451d1e9dba as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d1b4da00b17f421187ce38451d1e9dba 2024-11-23T19:39:50,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741876_1052 (size=186290) 2024-11-23T19:39:50,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d1b4da00b17f421187ce38451d1e9dba, entries=12, sequenceid=319, filesize=17.5 K 2024-11-23T19:39:50,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741876_1052 (size=186290) 2024-11-23T19:39:50,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 26adb307ab68e8fcb9782771920a2811 in 23ms, sequenceid=319, compaction requested=false 2024-11-23T19:39:50,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:50,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41609 {}] regionserver.HRegion(8855): Flush requested on 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:50,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-23T19:39:50,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/83478127f9634d229580f71314f1a4fe is 1080, key is row0241/info:/1732390790897/Put/seqid=0 2024-11-23T19:39:50,923 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/39cf4d05de2e49b0a4e995de84b756a8 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/39cf4d05de2e49b0a4e995de84b756a8 2024-11-23T19:39:50,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741877_1053 (size=16839) 2024-11-23T19:39:50,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741877_1053 (size=16839) 2024-11-23T19:39:50,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/83478127f9634d229580f71314f1a4fe 2024-11-23T19:39:50,929 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into 39cf4d05de2e49b0a4e995de84b756a8(size=181.9 K), total size for store is 199.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:50,929 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:50,929 INFO [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390790894; duration=0sec 2024-11-23T19:39:50,929 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:50,929 DEBUG [RS:0;387b213c044a:41609-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:50,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/83478127f9634d229580f71314f1a4fe as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/83478127f9634d229580f71314f1a4fe 2024-11-23T19:39:50,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/83478127f9634d229580f71314f1a4fe, entries=11, sequenceid=333, filesize=16.4 K 2024-11-23T19:39:50,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for 26adb307ab68e8fcb9782771920a2811 in 17ms, sequenceid=333, compaction requested=true 2024-11-23T19:39:50,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:50,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26adb307ab68e8fcb9782771920a2811:info, priority=-2147483648, current under compaction store size is 1 2024-11-23T19:39:50,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:50,936 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T19:39:50,937 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 221047 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T19:39:50,937 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1541): 26adb307ab68e8fcb9782771920a2811/info is initiating minor compaction (all files) 2024-11-23T19:39:50,937 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 26adb307ab68e8fcb9782771920a2811/info in TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:50,937 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/39cf4d05de2e49b0a4e995de84b756a8, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d1b4da00b17f421187ce38451d1e9dba, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/83478127f9634d229580f71314f1a4fe] into tmpdir=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp, totalSize=215.9 K 2024-11-23T19:39:50,938 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.Compactor(225): Compacting 39cf4d05de2e49b0a4e995de84b756a8, keycount=167, bloomtype=ROW, size=181.9 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732390758369 2024-11-23T19:39:50,938 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.Compactor(225): Compacting d1b4da00b17f421187ce38451d1e9dba, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732390790868 2024-11-23T19:39:50,938 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] compactions.Compactor(225): Compacting 83478127f9634d229580f71314f1a4fe, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732390790897 2024-11-23T19:39:50,949 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26adb307ab68e8fcb9782771920a2811#info#compaction#93 average throughput is 64.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T19:39:50,949 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/cd463afff15e43e682f719f73b7146d6 is 1080, key is row0062/info:/1732390758369/Put/seqid=0 2024-11-23T19:39:50,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741878_1054 (size=211286) 2024-11-23T19:39:50,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741878_1054 (size=211286) 2024-11-23T19:39:50,957 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/cd463afff15e43e682f719f73b7146d6 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/cd463afff15e43e682f719f73b7146d6 2024-11-23T19:39:50,963 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 26adb307ab68e8fcb9782771920a2811/info of 26adb307ab68e8fcb9782771920a2811 into cd463afff15e43e682f719f73b7146d6(size=206.3 K), total size for store is 206.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T19:39:50,963 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:50,963 INFO [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., storeName=26adb307ab68e8fcb9782771920a2811/info, priority=13, startTime=1732390790936; duration=0sec 2024-11-23T19:39:50,963 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T19:39:50,963 DEBUG [RS:0;387b213c044a:41609-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26adb307ab68e8fcb9782771920a2811:info 2024-11-23T19:39:51,022 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-23T19:39:51,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:51,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:52,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:52,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:52,930 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-23T19:39:52,931 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C41609%2C1732390744769.1732390792931 2024-11-23T19:39:52,955 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:52,955 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:52,956 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:52,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:52,956 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:52,956 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390745439 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390792931 2024-11-23T19:39:52,957 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41403:41403),(127.0.0.1/127.0.0.1:43601:43601)] 2024-11-23T19:39:52,957 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390745439 is not closed yet, will try archiving it next time 2024-11-23T19:39:52,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741832_1008 (size=317837) 2024-11-23T19:39:52,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741832_1008 (size=317837) 2024-11-23T19:39:52,961 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 26adb307ab68e8fcb9782771920a2811 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-23T19:39:52,965 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/835297dd0f3f4d22813dbe13de17274b is 1080, key is row0252/info:/1732390790920/Put/seqid=0 2024-11-23T19:39:52,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741880_1056 (size=10357) 2024-11-23T19:39:52,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741880_1056 (size=10357) 2024-11-23T19:39:52,970 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/835297dd0f3f4d22813dbe13de17274b 2024-11-23T19:39:52,976 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/.tmp/info/835297dd0f3f4d22813dbe13de17274b as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/835297dd0f3f4d22813dbe13de17274b 2024-11-23T19:39:52,982 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/835297dd0f3f4d22813dbe13de17274b, entries=5, sequenceid=343, filesize=10.1 K 2024-11-23T19:39:52,983 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 26adb307ab68e8fcb9782771920a2811 in 22ms, sequenceid=343, compaction requested=false 2024-11-23T19:39:52,983 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 26adb307ab68e8fcb9782771920a2811: 2024-11-23T19:39:52,983 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-23T19:39:52,987 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/info/7d81bffeecdc48208be107ce5d382435 is 193, key is TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811./info:regioninfo/1732390761182/Put/seqid=0 2024-11-23T19:39:52,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741881_1057 (size=6223) 2024-11-23T19:39:52,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741881_1057 (size=6223) 2024-11-23T19:39:52,992 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/info/7d81bffeecdc48208be107ce5d382435 2024-11-23T19:39:52,998 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/.tmp/info/7d81bffeecdc48208be107ce5d382435 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/info/7d81bffeecdc48208be107ce5d382435 2024-11-23T19:39:53,003 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/info/7d81bffeecdc48208be107ce5d382435, entries=5, sequenceid=21, filesize=6.1 K 2024-11-23T19:39:53,004 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-23T19:39:53,004 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-23T19:39:53,004 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d94ef5b652f6001b9acee3e178b4f374: 2024-11-23T19:39:53,004 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C41609%2C1732390744769.1732390793004 2024-11-23T19:39:53,009 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,009 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,009 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,009 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,009 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,009 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390792931 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390793004 2024-11-23T19:39:53,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41403:41403),(127.0.0.1/127.0.0.1:43601:43601)] 2024-11-23T19:39:53,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390792931 is not closed yet, will try archiving it next time 2024-11-23T19:39:53,010 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390745439 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/oldWALs/387b213c044a%2C41609%2C1732390744769.1732390745439 2024-11-23T19:39:53,010 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T19:39:53,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741879_1055 (size=731) 2024-11-23T19:39:53,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741879_1055 (size=731) 2024-11-23T19:39:53,011 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/WALs/387b213c044a,41609,1732390744769/387b213c044a%2C41609%2C1732390744769.1732390792931 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/oldWALs/387b213c044a%2C41609%2C1732390744769.1732390792931 2024-11-23T19:39:53,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:39:53,111 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:39:53,111 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:53,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:53,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:53,111 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:39:53,111 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:39:53,111 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=777264955, stopped=false 2024-11-23T19:39:53,112 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,42085,1732390744553 2024-11-23T19:39:53,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:53,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:53,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:53,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:53,367 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:39:53,368 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:39:53,368 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:53,369 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:53,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:53,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:53,370 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,41609,1732390744769' ***** 2024-11-23T19:39:53,370 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:39:53,371 INFO [RS:0;387b213c044a:41609 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:39:53,371 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:39:53,371 INFO [RS:0;387b213c044a:41609 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(3091): Received CLOSE for 26adb307ab68e8fcb9782771920a2811 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(3091): Received CLOSE for d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,41609,1732390744769 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:39:53,372 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 26adb307ab68e8fcb9782771920a2811, disabling compactions & flushes 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:41609. 2024-11-23T19:39:53,372 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:53,372 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:53,372 DEBUG [RS:0;387b213c044a:41609 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:53,372 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. after waiting 0 ms 2024-11-23T19:39:53,372 DEBUG [RS:0;387b213c044a:41609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:53,372 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:39:53,372 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:39:53,373 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-23T19:39:53,373 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1325): Online Regions={26adb307ab68e8fcb9782771920a2811=TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811., 1588230740=hbase:meta,,1.1588230740, d94ef5b652f6001b9acee3e178b4f374=TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.} 2024-11-23T19:39:53,373 DEBUG [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 26adb307ab68e8fcb9782771920a2811, d94ef5b652f6001b9acee3e178b4f374 2024-11-23T19:39:53,373 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:39:53,373 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:39:53,373 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:39:53,373 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:39:53,373 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:39:53,373 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151->hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31-top, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/88266589d4544d87be74f5c71b1093c0, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/fb186d23b75648dfaa099f6e3cc2e6a5, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/81c6d1c4b23d48d0a419dff52a3ff838, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5875f90489fd48a58dbfaebed3d33318, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/448d9e49321d4cd591afe9663e6bfa4b, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aadb53d088dc4814b00b7031b7d0ecbf, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7a979eefec4848d993eca22646923841, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/63acbd560ca14311a30a2b16fe8f5175, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7cd4a43419184b37a263ee20f1d8d272, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d28093bd36ec4cf79ac1e02a27c44576, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/e59c79f35aa340b9a36696f651d7713f, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/6e8787ccdd7a477a96df4a9eaad44e36, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/608673df7413446b9d607fba0b6c684e, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aa3b270e2d48407b87d12c76b24b853c, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/021740db59fe42968437aadde8100bff, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/8d736b748fe743e4942b18d8072f14d9, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/a58b2a9393364836b2388e08075676f7, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/39cf4d05de2e49b0a4e995de84b756a8, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/625bacf3aca24c01a5cbc2961192fcf6, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d1b4da00b17f421187ce38451d1e9dba, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/83478127f9634d229580f71314f1a4fe] to archive 2024-11-23T19:39:53,374 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T19:39:53,376 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:53,377 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-fdc06ef66c764111836cf42ffc20cb56 2024-11-23T19:39:53,377 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-23T19:39:53,378 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:39:53,378 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:53,378 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390793373Running coprocessor pre-close hooks at 1732390793373Disabling compacts and flushes for region at 1732390793373Disabling writes for close at 1732390793373Writing region close event to WAL at 1732390793374 (+1 ms)Running coprocessor post-close hooks at 1732390793378 (+4 ms)Closed at 1732390793378 2024-11-23T19:39:53,378 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:53,378 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/88266589d4544d87be74f5c71b1093c0 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/88266589d4544d87be74f5c71b1093c0 2024-11-23T19:39:53,379 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/TestLogRolling-testLogRolling=bd1ea0efe9f4783ac621c9c9a4216151-bf8cf6dd8935491eba3d0449a6964ae2 2024-11-23T19:39:53,380 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/fb186d23b75648dfaa099f6e3cc2e6a5 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/fb186d23b75648dfaa099f6e3cc2e6a5 2024-11-23T19:39:53,381 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/81c6d1c4b23d48d0a419dff52a3ff838 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/81c6d1c4b23d48d0a419dff52a3ff838 2024-11-23T19:39:53,382 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5875f90489fd48a58dbfaebed3d33318 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/5875f90489fd48a58dbfaebed3d33318 2024-11-23T19:39:53,383 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/448d9e49321d4cd591afe9663e6bfa4b to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/448d9e49321d4cd591afe9663e6bfa4b 2024-11-23T19:39:53,384 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aadb53d088dc4814b00b7031b7d0ecbf to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aadb53d088dc4814b00b7031b7d0ecbf 2024-11-23T19:39:53,385 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7a979eefec4848d993eca22646923841 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7a979eefec4848d993eca22646923841 2024-11-23T19:39:53,386 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/63acbd560ca14311a30a2b16fe8f5175 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/63acbd560ca14311a30a2b16fe8f5175 2024-11-23T19:39:53,387 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7cd4a43419184b37a263ee20f1d8d272 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/7cd4a43419184b37a263ee20f1d8d272 2024-11-23T19:39:53,388 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d28093bd36ec4cf79ac1e02a27c44576 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d28093bd36ec4cf79ac1e02a27c44576 2024-11-23T19:39:53,389 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/e59c79f35aa340b9a36696f651d7713f to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/e59c79f35aa340b9a36696f651d7713f 2024-11-23T19:39:53,390 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/6e8787ccdd7a477a96df4a9eaad44e36 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/6e8787ccdd7a477a96df4a9eaad44e36 2024-11-23T19:39:53,391 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/608673df7413446b9d607fba0b6c684e to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/608673df7413446b9d607fba0b6c684e 2024-11-23T19:39:53,392 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aa3b270e2d48407b87d12c76b24b853c to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/aa3b270e2d48407b87d12c76b24b853c 2024-11-23T19:39:53,392 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/021740db59fe42968437aadde8100bff to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/021740db59fe42968437aadde8100bff 2024-11-23T19:39:53,393 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/8d736b748fe743e4942b18d8072f14d9 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/8d736b748fe743e4942b18d8072f14d9 2024-11-23T19:39:53,394 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/a58b2a9393364836b2388e08075676f7 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/a58b2a9393364836b2388e08075676f7 2024-11-23T19:39:53,395 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/39cf4d05de2e49b0a4e995de84b756a8 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/39cf4d05de2e49b0a4e995de84b756a8 2024-11-23T19:39:53,396 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/625bacf3aca24c01a5cbc2961192fcf6 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/625bacf3aca24c01a5cbc2961192fcf6 2024-11-23T19:39:53,397 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d1b4da00b17f421187ce38451d1e9dba to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/d1b4da00b17f421187ce38451d1e9dba 2024-11-23T19:39:53,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/83478127f9634d229580f71314f1a4fe to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/info/83478127f9634d229580f71314f1a4fe 2024-11-23T19:39:53,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=387b213c044a:42085 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-23T19:39:53,399 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [88266589d4544d87be74f5c71b1093c0=42984, fb186d23b75648dfaa099f6e3cc2e6a5=12516, 81c6d1c4b23d48d0a419dff52a3ff838=67948, 5875f90489fd48a58dbfaebed3d33318=22238, 448d9e49321d4cd591afe9663e6bfa4b=15750, aadb53d088dc4814b00b7031b7d0ecbf=91843, 7a979eefec4848d993eca22646923841=17906, 63acbd560ca14311a30a2b16fe8f5175=16828, 7cd4a43419184b37a263ee20f1d8d272=113509, d28093bd36ec4cf79ac1e02a27c44576=14672, e59c79f35aa340b9a36696f651d7713f=24394, 6e8787ccdd7a477a96df4a9eaad44e36=146156, 608673df7413446b9d607fba0b6c684e=17906, aa3b270e2d48407b87d12c76b24b853c=12522, 021740db59fe42968437aadde8100bff=166762, 8d736b748fe743e4942b18d8072f14d9=17918, a58b2a9393364836b2388e08075676f7=16839, 39cf4d05de2e49b0a4e995de84b756a8=186290, 625bacf3aca24c01a5cbc2961192fcf6=12523, d1b4da00b17f421187ce38451d1e9dba=17918, 83478127f9634d229580f71314f1a4fe=16839] 2024-11-23T19:39:53,402 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/26adb307ab68e8fcb9782771920a2811/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-11-23T19:39:53,402 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:53,402 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 26adb307ab68e8fcb9782771920a2811: Waiting for close lock at 1732390793372Running coprocessor pre-close hooks at 1732390793372Disabling compacts and flushes for region at 1732390793372Disabling writes for close at 1732390793372Writing region close event to WAL at 1732390793399 (+27 ms)Running coprocessor post-close hooks at 1732390793402 (+3 ms)Closed at 1732390793402 2024-11-23T19:39:53,402 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732390760469.26adb307ab68e8fcb9782771920a2811. 2024-11-23T19:39:53,402 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d94ef5b652f6001b9acee3e178b4f374, disabling compactions & flushes 2024-11-23T19:39:53,402 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:53,402 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:53,403 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. after waiting 1 ms 2024-11-23T19:39:53,403 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:53,403 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151->hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/bd1ea0efe9f4783ac621c9c9a4216151/info/5c17896b20864efba2cbd43b3dc79c31-bottom] to archive 2024-11-23T19:39:53,404 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T19:39:53,405 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151 to hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/archive/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/info/5c17896b20864efba2cbd43b3dc79c31.bd1ea0efe9f4783ac621c9c9a4216151 2024-11-23T19:39:53,405 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-23T19:39:53,408 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/data/default/TestLogRolling-testLogRolling/d94ef5b652f6001b9acee3e178b4f374/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-23T19:39:53,408 INFO [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:53,408 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d94ef5b652f6001b9acee3e178b4f374: Waiting for close lock at 1732390793402Running coprocessor pre-close hooks at 1732390793402Disabling compacts and flushes for region at 1732390793402Disabling writes for close at 1732390793403 (+1 ms)Writing region close event to WAL at 1732390793405 (+2 ms)Running coprocessor post-close hooks at 1732390793408 (+3 ms)Closed at 1732390793408 2024-11-23T19:39:53,408 DEBUG [RS_CLOSE_REGION-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732390760469.d94ef5b652f6001b9acee3e178b4f374. 2024-11-23T19:39:53,573 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,41609,1732390744769; all regions closed. 2024-11-23T19:39:53,574 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,575 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,575 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,575 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,576 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741834_1010 (size=8107) 2024-11-23T19:39:53,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741834_1010 (size=8107) 2024-11-23T19:39:53,586 DEBUG [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/oldWALs 2024-11-23T19:39:53,586 INFO [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C41609%2C1732390744769.meta:.meta(num 1732390746002) 2024-11-23T19:39:53,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741882_1058 (size=780) 2024-11-23T19:39:53,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741882_1058 (size=780) 2024-11-23T19:39:53,591 DEBUG [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/oldWALs 2024-11-23T19:39:53,591 INFO [RS:0;387b213c044a:41609 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C41609%2C1732390744769:(num 1732390793004) 2024-11-23T19:39:53,592 DEBUG [RS:0;387b213c044a:41609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:53,592 INFO [RS:0;387b213c044a:41609 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:39:53,592 INFO [RS:0;387b213c044a:41609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:39:53,592 INFO [RS:0;387b213c044a:41609 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:39:53,592 INFO [RS:0;387b213c044a:41609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:39:53,592 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:39:53,592 INFO [RS:0;387b213c044a:41609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41609 2024-11-23T19:39:53,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,41609,1732390744769 2024-11-23T19:39:53,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:39:53,611 INFO [RS:0;387b213c044a:41609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:39:53,621 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,41609,1732390744769] 2024-11-23T19:39:53,632 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,41609,1732390744769 already deleted, retry=false 2024-11-23T19:39:53,632 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,41609,1732390744769 expired; onlineServers=0 2024-11-23T19:39:53,632 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,42085,1732390744553' ***** 2024-11-23T19:39:53,632 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:39:53,632 INFO [M:0;387b213c044a:42085 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:39:53,632 INFO [M:0;387b213c044a:42085 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:39:53,633 DEBUG [M:0;387b213c044a:42085 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:39:53,633 DEBUG [M:0;387b213c044a:42085 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:39:53,633 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:39:53,633 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390745175 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390745175,5,FailOnTimeoutGroup] 2024-11-23T19:39:53,633 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390745172 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390745172,5,FailOnTimeoutGroup] 2024-11-23T19:39:53,634 INFO [M:0;387b213c044a:42085 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:39:53,634 INFO [M:0;387b213c044a:42085 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:39:53,634 DEBUG [M:0;387b213c044a:42085 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:39:53,634 INFO [M:0;387b213c044a:42085 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:39:53,635 INFO [M:0;387b213c044a:42085 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:39:53,635 INFO [M:0;387b213c044a:42085 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:39:53,635 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:39:53,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:53,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:53,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:39:53,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:53,642 DEBUG [M:0;387b213c044a:42085 {}] zookeeper.ZKUtil(347): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:39:53,642 WARN [M:0;387b213c044a:42085 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:39:53,642 INFO [M:0;387b213c044a:42085 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/.lastflushedseqids 2024-11-23T19:39:53,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741883_1059 (size=228) 2024-11-23T19:39:53,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741883_1059 (size=228) 2024-11-23T19:39:53,648 INFO [M:0;387b213c044a:42085 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:39:53,648 INFO [M:0;387b213c044a:42085 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:39:53,648 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:39:53,648 INFO [M:0;387b213c044a:42085 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:53,648 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:53,648 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:39:53,648 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:53,648 INFO [M:0;387b213c044a:42085 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-23T19:39:53,662 DEBUG [M:0;387b213c044a:42085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3a0e440d8af2464a84a34fb7584c53df is 82, key is hbase:meta,,1/info:regioninfo/1732390746042/Put/seqid=0 2024-11-23T19:39:53,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741884_1060 (size=5672) 2024-11-23T19:39:53,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741884_1060 (size=5672) 2024-11-23T19:39:53,666 INFO [M:0;387b213c044a:42085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3a0e440d8af2464a84a34fb7584c53df 2024-11-23T19:39:53,683 DEBUG [M:0;387b213c044a:42085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9a02dbfdd33439fa41308dd88a19423 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732390746592/Put/seqid=0 2024-11-23T19:39:53,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741885_1061 (size=7089) 2024-11-23T19:39:53,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741885_1061 (size=7089) 2024-11-23T19:39:53,687 INFO [M:0;387b213c044a:42085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9a02dbfdd33439fa41308dd88a19423 2024-11-23T19:39:53,691 INFO [M:0;387b213c044a:42085 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f9a02dbfdd33439fa41308dd88a19423 2024-11-23T19:39:53,704 DEBUG [M:0;387b213c044a:42085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/19a6371104994612a2b2c4c9e2bf9bf6 is 69, key is 387b213c044a,41609,1732390744769/rs:state/1732390745250/Put/seqid=0 2024-11-23T19:39:53,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741886_1062 (size=5156) 2024-11-23T19:39:53,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741886_1062 (size=5156) 2024-11-23T19:39:53,708 INFO [M:0;387b213c044a:42085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/19a6371104994612a2b2c4c9e2bf9bf6 2024-11-23T19:39:53,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:53,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41609-0x1016934ed750001, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:53,721 INFO [RS:0;387b213c044a:41609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:39:53,721 INFO [RS:0;387b213c044a:41609 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,41609,1732390744769; zookeeper connection closed. 2024-11-23T19:39:53,722 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c724254 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c724254 2024-11-23T19:39:53,722 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T19:39:53,731 DEBUG [M:0;387b213c044a:42085 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99535cc376604b97b91ae5bf15d20cca is 52, key is load_balancer_on/state:d/1732390746213/Put/seqid=0 2024-11-23T19:39:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741887_1063 (size=5056) 2024-11-23T19:39:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741887_1063 (size=5056) 2024-11-23T19:39:53,736 INFO [M:0;387b213c044a:42085 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99535cc376604b97b91ae5bf15d20cca 2024-11-23T19:39:53,742 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3a0e440d8af2464a84a34fb7584c53df as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3a0e440d8af2464a84a34fb7584c53df 2024-11-23T19:39:53,746 INFO [M:0;387b213c044a:42085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3a0e440d8af2464a84a34fb7584c53df, entries=8, sequenceid=125, filesize=5.5 K 2024-11-23T19:39:53,747 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9a02dbfdd33439fa41308dd88a19423 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9a02dbfdd33439fa41308dd88a19423 2024-11-23T19:39:53,752 INFO [M:0;387b213c044a:42085 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f9a02dbfdd33439fa41308dd88a19423 2024-11-23T19:39:53,752 INFO [M:0;387b213c044a:42085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9a02dbfdd33439fa41308dd88a19423, entries=13, sequenceid=125, filesize=6.9 K 2024-11-23T19:39:53,753 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/19a6371104994612a2b2c4c9e2bf9bf6 as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/19a6371104994612a2b2c4c9e2bf9bf6 2024-11-23T19:39:53,757 INFO [M:0;387b213c044a:42085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/19a6371104994612a2b2c4c9e2bf9bf6, entries=1, sequenceid=125, filesize=5.0 K 2024-11-23T19:39:53,758 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99535cc376604b97b91ae5bf15d20cca as hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/99535cc376604b97b91ae5bf15d20cca 2024-11-23T19:39:53,762 INFO [M:0;387b213c044a:42085 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41669/user/jenkins/test-data/c0be3f8d-b429-dc11-6d7d-1e50895cce98/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/99535cc376604b97b91ae5bf15d20cca, entries=1, sequenceid=125, filesize=4.9 K 2024-11-23T19:39:53,763 INFO [M:0;387b213c044a:42085 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false 2024-11-23T19:39:53,765 INFO [M:0;387b213c044a:42085 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:53,765 DEBUG [M:0;387b213c044a:42085 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390793648Disabling compacts and flushes for region at 1732390793648Disabling writes for close at 1732390793648Obtaining lock to block concurrent updates at 1732390793648Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390793648Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732390793649 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390793649Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390793649Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390793661 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390793661Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390793670 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390793682 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390793683 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390793691 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390793703 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390793703Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390793712 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390793730 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390793730Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ae503ad: reopening flushed file at 1732390793741 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57935e5: reopening flushed file at 1732390793746 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@261d31f6: reopening flushed file at 1732390793752 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62afc55: reopening flushed file at 1732390793757 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false at 1732390793763 (+6 ms)Writing region close event to WAL at 1732390793765 (+2 ms)Closed at 1732390793765 2024-11-23T19:39:53,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,766 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,766 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:53,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42507 is added to blk_1073741830_1006 (size=61320) 2024-11-23T19:39:53,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46697 is added to blk_1073741830_1006 (size=61320) 2024-11-23T19:39:53,768 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:39:53,768 INFO [M:0;387b213c044a:42085 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:39:53,768 INFO [M:0;387b213c044a:42085 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42085 2024-11-23T19:39:53,769 INFO [M:0;387b213c044a:42085 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:39:53,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:53,879 INFO [M:0;387b213c044a:42085 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:39:53,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42085-0x1016934ed750000, quorum=127.0.0.1:63840, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:53,883 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9caeb33{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:53,884 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1284b092{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:53,884 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:53,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25f949b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:53,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@692ba77d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:53,888 WARN [BP-639699025-172.17.0.3-1732390742380 heartbeating to localhost/127.0.0.1:41669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:39:53,888 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:39:53,888 WARN [BP-639699025-172.17.0.3-1732390742380 heartbeating to localhost/127.0.0.1:41669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-639699025-172.17.0.3-1732390742380 (Datanode Uuid e9989444-cdec-419d-a323-ca0ed17f10ce) service to localhost/127.0.0.1:41669 2024-11-23T19:39:53,888 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:39:53,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data3/current/BP-639699025-172.17.0.3-1732390742380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:53,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data4/current/BP-639699025-172.17.0.3-1732390742380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:53,889 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:39:53,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70d55230{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:53,893 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7986f193{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:53,893 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:53,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f53b1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:53,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2001df3c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:53,898 WARN [BP-639699025-172.17.0.3-1732390742380 heartbeating to localhost/127.0.0.1:41669 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:39:53,898 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:39:53,898 WARN [BP-639699025-172.17.0.3-1732390742380 heartbeating to localhost/127.0.0.1:41669 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-639699025-172.17.0.3-1732390742380 (Datanode Uuid 610b4960-f9b6-4ac6-95cd-a7145fad1416) service to localhost/127.0.0.1:41669 2024-11-23T19:39:53,898 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:39:53,898 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data1/current/BP-639699025-172.17.0.3-1732390742380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:53,899 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/cluster_eff18020-c07d-7b25-d7ec-35b1cdebb6d0/data/data2/current/BP-639699025-172.17.0.3-1732390742380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:53,899 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:39:53,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cf57c4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:39:53,905 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a67ff9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:53,905 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:53,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f8818bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:53,905 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24350cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:53,911 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:39:53,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:39:53,951 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 205) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/387b213c044a:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41669 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41669 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=211 (was 247), ProcessCount=11 (was 11), AvailableMemoryMB=3116 (was 2708) - AvailableMemoryMB LEAK? - 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=211, ProcessCount=11, AvailableMemoryMB=3115 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.log.dir so I do NOT create it in target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e406aee6-6ea8-41a4-1b04-f44a435ac1e2/hadoop.tmp.dir so I do NOT create it in target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92, deleteOnExit=true 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/test.cache.data in system properties and HBase conf 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir in system properties and HBase conf 2024-11-23T19:39:53,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T19:39:53,960 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/nfs.dump.dir in system properties and HBase conf 2024-11-23T19:39:53,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/java.io.tmpdir in system properties and HBase conf 2024-11-23T19:39:53,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T19:39:53,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T19:39:53,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T19:39:53,974 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:39:54,425 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:54,429 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:39:54,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:39:54,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:39:54,431 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:39:54,431 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:54,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49428dc3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:39:54,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7043bc8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:39:54,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b404f99{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/java.io.tmpdir/jetty-localhost-39433-hadoop-hdfs-3_4_1-tests_jar-_-any-13755372600698706101/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:39:54,523 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3698cd86{HTTP/1.1, (http/1.1)}{localhost:39433} 2024-11-23T19:39:54,524 INFO [Time-limited test {}] server.Server(415): Started @307054ms 2024-11-23T19:39:54,534 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-23T19:39:54,534 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:39:54,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T19:39:54,535 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T19:39:54,536 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-23T19:39:54,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:54,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:54,884 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:54,886 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:39:54,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:39:54,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:39:54,887 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:39:54,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7acdff1a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:39:54,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40d0e54c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:39:54,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5eefcd14{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/java.io.tmpdir/jetty-localhost-37651-hadoop-hdfs-3_4_1-tests_jar-_-any-5455809911691052655/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:54,983 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75728f34{HTTP/1.1, (http/1.1)}{localhost:37651} 2024-11-23T19:39:54,983 INFO [Time-limited test {}] server.Server(415): Started @307513ms 2024-11-23T19:39:54,984 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:39:55,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T19:39:55,011 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T19:39:55,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T19:39:55,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T19:39:55,011 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T19:39:55,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3829c39e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir/,AVAILABLE} 2024-11-23T19:39:55,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72757a2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T19:39:55,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2269c58e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/java.io.tmpdir/jetty-localhost-37135-hadoop-hdfs-3_4_1-tests_jar-_-any-11635785497888029105/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:55,104 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@477d322e{HTTP/1.1, (http/1.1)}{localhost:37135} 2024-11-23T19:39:55,104 INFO [Time-limited test {}] server.Server(415): Started @307635ms 2024-11-23T19:39:55,105 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T19:39:55,303 INFO [regionserver/387b213c044a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:39:55,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:55,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:55,644 DEBUG [master/387b213c044a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=9, created chunk count=9, reused chunk count=74, reuseRatio=89.16% 2024-11-23T19:39:55,645 DEBUG [master/387b213c044a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T19:39:56,597 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data1/current/BP-1547216778-172.17.0.3-1732390793976/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:56,597 WARN [Thread-2522 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data2/current/BP-1547216778-172.17.0.3-1732390793976/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:56,615 WARN [Thread-2527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data3/current/BP-1547216778-172.17.0.3-1732390793976/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:56,615 WARN [Thread-2528 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data4/current/BP-1547216778-172.17.0.3-1732390793976/current, will proceed with Du for space computation calculation, 2024-11-23T19:39:56,622 WARN [Thread-2485 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:39:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f0ff3ca0621ddc3 with lease ID 0x43654e7fe3dad4cf: Processing first storage report for DS-a6439a74-44e7-4bb7-86d5-33c11bb4cf4a from datanode DatanodeRegistration(127.0.0.1:40673, datanodeUuid=e77be7a5-85ce-494a-8206-d05d4e78af97, infoPort=39161, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976) 2024-11-23T19:39:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f0ff3ca0621ddc3 with lease ID 0x43654e7fe3dad4cf: from storage DS-a6439a74-44e7-4bb7-86d5-33c11bb4cf4a node DatanodeRegistration(127.0.0.1:40673, datanodeUuid=e77be7a5-85ce-494a-8206-d05d4e78af97, infoPort=39161, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f0ff3ca0621ddc3 with lease ID 0x43654e7fe3dad4cf: Processing first storage report for DS-1e1908f1-a0c6-4f81-927c-e131c2688c02 from datanode DatanodeRegistration(127.0.0.1:40673, datanodeUuid=e77be7a5-85ce-494a-8206-d05d4e78af97, infoPort=39161, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976) 2024-11-23T19:39:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f0ff3ca0621ddc3 with lease ID 0x43654e7fe3dad4cf: from storage DS-1e1908f1-a0c6-4f81-927c-e131c2688c02 node DatanodeRegistration(127.0.0.1:40673, datanodeUuid=e77be7a5-85ce-494a-8206-d05d4e78af97, infoPort=39161, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:56,632 WARN [Thread-2508 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T19:39:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c074e6c4be27e66 with lease ID 0x43654e7fe3dad4d0: Processing first storage report for DS-0d2ed39a-64e5-45e2-ba51-f9bb9506064a from datanode DatanodeRegistration(127.0.0.1:38389, datanodeUuid=a3749530-6142-4c75-a53b-73afaa186a72, infoPort=43543, infoSecurePort=0, ipcPort=34225, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976) 2024-11-23T19:39:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c074e6c4be27e66 with lease ID 0x43654e7fe3dad4d0: from storage DS-0d2ed39a-64e5-45e2-ba51-f9bb9506064a node DatanodeRegistration(127.0.0.1:38389, datanodeUuid=a3749530-6142-4c75-a53b-73afaa186a72, infoPort=43543, infoSecurePort=0, ipcPort=34225, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c074e6c4be27e66 with lease ID 0x43654e7fe3dad4d0: Processing first storage report for DS-dd7c3977-e0fc-49b8-b364-cf6c9de7ddae from datanode DatanodeRegistration(127.0.0.1:38389, datanodeUuid=a3749530-6142-4c75-a53b-73afaa186a72, infoPort=43543, infoSecurePort=0, ipcPort=34225, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976) 2024-11-23T19:39:56,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c074e6c4be27e66 with lease ID 0x43654e7fe3dad4d0: from storage DS-dd7c3977-e0fc-49b8-b364-cf6c9de7ddae node DatanodeRegistration(127.0.0.1:38389, datanodeUuid=a3749530-6142-4c75-a53b-73afaa186a72, infoPort=43543, infoSecurePort=0, ipcPort=34225, storageInfo=lv=-57;cid=testClusterID;nsid=409613724;c=1732390793976), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T19:39:56,638 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e 2024-11-23T19:39:56,640 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/zookeeper_0, clientPort=63067, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T19:39:56,641 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63067 2024-11-23T19:39:56,641 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,642 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:56,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:56,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:39:56,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741825_1001 (size=7) 2024-11-23T19:39:56,654 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3 with version=8 2024-11-23T19:39:56,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35281/user/jenkins/test-data/ac636f4d-2fae-1dc4-1d58-ac67c5e4217d/hbase-staging 2024-11-23T19:39:56,656 INFO [Time-limited test {}] client.ConnectionUtils(128): master/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T19:39:56,656 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:39:56,657 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43797 2024-11-23T19:39:56,658 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43797 connecting to ZooKeeper ensemble=127.0.0.1:63067 2024-11-23T19:39:56,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:437970x0, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:39:56,757 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43797-0x1016935b90b0000 connected 2024-11-23T19:39:56,842 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:56,848 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3, hbase.cluster.distributed=false 2024-11-23T19:39:56,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:39:56,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43797 2024-11-23T19:39:56,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43797 2024-11-23T19:39:56,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43797 2024-11-23T19:39:56,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43797 2024-11-23T19:39:56,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43797 2024-11-23T19:39:56,873 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/387b213c044a:0 server-side Connection retries=45 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T19:39:56,873 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T19:39:56,874 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40109 2024-11-23T19:39:56,875 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40109 connecting to ZooKeeper ensemble=127.0.0.1:63067 2024-11-23T19:39:56,875 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,877 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:401090x0, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T19:39:56,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:401090x0, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:56,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40109-0x1016935b90b0001 connected 2024-11-23T19:39:56,884 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T19:39:56,884 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T19:39:56,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T19:39:56,886 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T19:39:56,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40109 2024-11-23T19:39:56,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40109 2024-11-23T19:39:56,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40109 2024-11-23T19:39:56,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40109 2024-11-23T19:39:56,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40109 2024-11-23T19:39:56,899 DEBUG [M:0;387b213c044a:43797 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;387b213c044a:43797 2024-11-23T19:39:56,899 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/387b213c044a,43797,1732390796655 2024-11-23T19:39:56,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:56,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:56,905 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/387b213c044a,43797,1732390796655 2024-11-23T19:39:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T19:39:56,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:56,915 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T19:39:56,915 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/387b213c044a,43797,1732390796655 from backup master directory 2024-11-23T19:39:56,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/387b213c044a,43797,1732390796655 2024-11-23T19:39:56,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:56,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T19:39:56,925 WARN [master/387b213c044a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:39:56,925 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=387b213c044a,43797,1732390796655 2024-11-23T19:39:56,929 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/hbase.id] with ID: 3fb5a507-28b1-4a84-a4ab-0a3aa0c0bb1c 2024-11-23T19:39:56,929 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/.tmp/hbase.id 2024-11-23T19:39:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:39:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741826_1002 (size=42) 2024-11-23T19:39:56,934 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/.tmp/hbase.id]:[hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/hbase.id] 2024-11-23T19:39:56,943 INFO [master/387b213c044a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:56,943 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T19:39:56,944 INFO [master/387b213c044a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-23T19:39:56,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:56,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:56,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:39:56,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741827_1003 (size=196) 2024-11-23T19:39:56,968 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T19:39:56,969 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T19:39:56,969 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:56,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:39:56,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741828_1004 (size=1189) 2024-11-23T19:39:56,976 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store 2024-11-23T19:39:56,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:39:56,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741829_1005 (size=34) 2024-11-23T19:39:56,982 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:56,982 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:39:56,982 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:56,982 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:56,982 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:39:56,982 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:56,982 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:56,982 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390796982Disabling compacts and flushes for region at 1732390796982Disabling writes for close at 1732390796982Writing region close event to WAL at 1732390796982Closed at 1732390796982 2024-11-23T19:39:56,983 WARN [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/.initializing 2024-11-23T19:39:56,983 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/WALs/387b213c044a,43797,1732390796655 2024-11-23T19:39:56,985 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C43797%2C1732390796655, suffix=, logDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/WALs/387b213c044a,43797,1732390796655, archiveDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/oldWALs, maxLogs=10 2024-11-23T19:39:56,985 INFO [master/387b213c044a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C43797%2C1732390796655.1732390796985 2024-11-23T19:39:56,995 INFO [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/WALs/387b213c044a,43797,1732390796655/387b213c044a%2C43797%2C1732390796655.1732390796985 2024-11-23T19:39:57,000 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:43543:43543)] 2024-11-23T19:39:57,001 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:39:57,001 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:57,001 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,001 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T19:39:57,003 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T19:39:57,004 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:57,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T19:39:57,006 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:57,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T19:39:57,007 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T19:39:57,007 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,008 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,008 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,009 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,009 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,009 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T19:39:57,010 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T19:39:57,012 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:39:57,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851345, jitterRate=0.08254130184650421}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T19:39:57,012 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732390797001Initializing all the Stores at 1732390797002 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797002Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390797002Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390797002Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390797002Cleaning up temporary data from old regions at 1732390797009 (+7 ms)Region opened successfully at 1732390797012 (+3 ms) 2024-11-23T19:39:57,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T19:39:57,014 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15568ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:39:57,015 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T19:39:57,015 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T19:39:57,015 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T19:39:57,016 INFO [master/387b213c044a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T19:39:57,016 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-23T19:39:57,016 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-23T19:39:57,016 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T19:39:57,018 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T19:39:57,019 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T19:39:57,030 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T19:39:57,031 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T19:39:57,032 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T19:39:57,041 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T19:39:57,041 INFO [master/387b213c044a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T19:39:57,043 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T19:39:57,052 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T19:39:57,053 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T19:39:57,062 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T19:39:57,067 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T19:39:57,072 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T19:39:57,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:57,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:57,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,084 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=387b213c044a,43797,1732390796655, sessionid=0x1016935b90b0000, setting cluster-up flag (Was=false) 2024-11-23T19:39:57,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,136 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T19:39:57,137 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,43797,1732390796655 2024-11-23T19:39:57,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,188 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T19:39:57,189 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=387b213c044a,43797,1732390796655 2024-11-23T19:39:57,191 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T19:39:57,192 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:57,192 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T19:39:57,192 INFO [master/387b213c044a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T19:39:57,192 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 387b213c044a,43797,1732390796655 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T19:39:57,193 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(746): ClusterId : 3fb5a507-28b1-4a84-a4ab-0a3aa0c0bb1c 2024-11-23T19:39:57,193 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/387b213c044a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/387b213c044a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:39:57,194 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732390827194 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,195 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:57,195 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T19:39:57,195 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T19:39:57,196 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T19:39:57,196 INFO [master/387b213c044a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T19:39:57,196 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390797196,5,FailOnTimeoutGroup] 2024-11-23T19:39:57,196 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390797196,5,FailOnTimeoutGroup] 2024-11-23T19:39:57,196 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,196 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T19:39:57,196 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,196 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,196 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,196 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T19:39:57,199 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T19:39:57,199 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T19:39:57,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:39:57,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741831_1007 (size=1321) 2024-11-23T19:39:57,202 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T19:39:57,202 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3 2024-11-23T19:39:57,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:39:57,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741832_1008 (size=32) 2024-11-23T19:39:57,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:57,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:39:57,210 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:39:57,210 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,210 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T19:39:57,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,210 DEBUG [RS:0;387b213c044a:40109 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6290443f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=387b213c044a/172.17.0.3:0 2024-11-23T19:39:57,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:39:57,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:39:57,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:39:57,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:39:57,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:39:57,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:39:57,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,215 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:39:57,216 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740 2024-11-23T19:39:57,216 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740 2024-11-23T19:39:57,217 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:39:57,217 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:39:57,218 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:39:57,219 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:39:57,221 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T19:39:57,221 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718303, jitterRate=-0.08663149178028107}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:39:57,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732390797208Initializing all the Stores at 1732390797208Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797208Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797209 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390797209Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797209Cleaning up temporary data from old regions at 1732390797217 (+8 ms)Region opened successfully at 1732390797222 (+5 ms) 2024-11-23T19:39:57,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:39:57,222 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:39:57,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:39:57,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:39:57,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:39:57,222 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:57,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390797222Disabling compacts and flushes for region at 1732390797222Disabling writes for close at 1732390797222Writing region close event to WAL at 1732390797222Closed at 1732390797222 2024-11-23T19:39:57,223 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:57,223 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T19:39:57,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T19:39:57,224 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:39:57,225 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T19:39:57,228 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;387b213c044a:40109 2024-11-23T19:39:57,228 INFO [RS:0;387b213c044a:40109 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T19:39:57,228 INFO [RS:0;387b213c044a:40109 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T19:39:57,228 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T19:39:57,229 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(2659): reportForDuty to master=387b213c044a,43797,1732390796655 with port=40109, startcode=1732390796873 2024-11-23T19:39:57,229 DEBUG [RS:0;387b213c044a:40109 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T19:39:57,230 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T19:39:57,231 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43797 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 387b213c044a,40109,1732390796873 2024-11-23T19:39:57,231 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43797 {}] master.ServerManager(517): Registering regionserver=387b213c044a,40109,1732390796873 2024-11-23T19:39:57,232 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3 2024-11-23T19:39:57,232 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44327 2024-11-23T19:39:57,232 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T19:39:57,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:39:57,241 DEBUG [RS:0;387b213c044a:40109 {}] zookeeper.ZKUtil(111): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/387b213c044a,40109,1732390796873 2024-11-23T19:39:57,241 WARN [RS:0;387b213c044a:40109 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T19:39:57,241 INFO [RS:0;387b213c044a:40109 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:57,241 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/387b213c044a,40109,1732390796873 2024-11-23T19:39:57,242 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [387b213c044a,40109,1732390796873] 2024-11-23T19:39:57,245 INFO [RS:0;387b213c044a:40109 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T19:39:57,246 INFO [RS:0;387b213c044a:40109 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T19:39:57,247 INFO [RS:0;387b213c044a:40109 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T19:39:57,247 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,247 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T19:39:57,248 INFO [RS:0;387b213c044a:40109 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T19:39:57,248 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/387b213c044a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,248 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,249 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/387b213c044a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T19:39:57,249 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:39:57,249 DEBUG [RS:0;387b213c044a:40109 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/387b213c044a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T19:39:57,249 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,249 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,249 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,249 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,249 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,249 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40109,1732390796873-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:39:57,267 INFO [RS:0;387b213c044a:40109 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T19:39:57,267 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,40109,1732390796873-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,267 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,267 INFO [RS:0;387b213c044a:40109 {}] regionserver.Replication(171): 387b213c044a,40109,1732390796873 started 2024-11-23T19:39:57,279 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:57,279 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1482): Serving as 387b213c044a,40109,1732390796873, RpcServer on 387b213c044a/172.17.0.3:40109, sessionid=0x1016935b90b0001 2024-11-23T19:39:57,279 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T19:39:57,279 DEBUG [RS:0;387b213c044a:40109 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 387b213c044a,40109,1732390796873 2024-11-23T19:39:57,279 DEBUG [RS:0;387b213c044a:40109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,40109,1732390796873' 2024-11-23T19:39:57,279 DEBUG [RS:0;387b213c044a:40109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 387b213c044a,40109,1732390796873 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '387b213c044a,40109,1732390796873' 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T19:39:57,280 DEBUG [RS:0;387b213c044a:40109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T19:39:57,281 DEBUG [RS:0;387b213c044a:40109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T19:39:57,281 INFO [RS:0;387b213c044a:40109 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T19:39:57,281 INFO [RS:0;387b213c044a:40109 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T19:39:57,375 WARN [387b213c044a:43797 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T19:39:57,385 INFO [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C40109%2C1732390796873, suffix=, logDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/387b213c044a,40109,1732390796873, archiveDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs, maxLogs=32 2024-11-23T19:39:57,386 INFO [RS:0;387b213c044a:40109 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C40109%2C1732390796873.1732390797386 2024-11-23T19:39:57,395 INFO [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/387b213c044a,40109,1732390796873/387b213c044a%2C40109%2C1732390796873.1732390797386 2024-11-23T19:39:57,396 DEBUG [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:43543:43543)] 2024-11-23T19:39:57,626 DEBUG [387b213c044a:43797 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T19:39:57,627 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=387b213c044a,40109,1732390796873 2024-11-23T19:39:57,630 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,40109,1732390796873, state=OPENING 2024-11-23T19:39:57,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:57,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:57,715 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T19:39:57,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:57,726 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T19:39:57,726 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:57,726 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:57,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,40109,1732390796873}] 2024-11-23T19:39:57,880 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T19:39:57,885 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58725, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T19:39:57,889 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T19:39:57,889 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:57,891 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=387b213c044a%2C40109%2C1732390796873.meta, suffix=.meta, logDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/387b213c044a,40109,1732390796873, archiveDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs, maxLogs=32 2024-11-23T19:39:57,892 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 387b213c044a%2C40109%2C1732390796873.meta.1732390797892.meta 2024-11-23T19:39:57,896 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/387b213c044a,40109,1732390796873/387b213c044a%2C40109%2C1732390796873.meta.1732390797892.meta 2024-11-23T19:39:57,902 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43543:43543),(127.0.0.1/127.0.0.1:39161:39161)] 2024-11-23T19:39:57,904 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T19:39:57,905 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T19:39:57,905 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T19:39:57,905 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T19:39:57,905 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T19:39:57,905 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T19:39:57,905 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T19:39:57,905 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T19:39:57,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T19:39:57,907 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T19:39:57,907 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,907 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T19:39:57,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T19:39:57,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T19:39:57,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T19:39:57,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T19:39:57,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T19:39:57,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T19:39:57,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T19:39:57,911 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T19:39:57,911 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740 2024-11-23T19:39:57,912 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740 2024-11-23T19:39:57,913 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T19:39:57,913 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T19:39:57,914 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T19:39:57,915 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T19:39:57,916 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690831, jitterRate=-0.12156379222869873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T19:39:57,916 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T19:39:57,916 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732390797905Writing region info on filesystem at 1732390797905Initializing all the Stores at 1732390797906 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797906Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797906Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732390797906Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732390797906Cleaning up temporary data from old regions at 1732390797913 (+7 ms)Running coprocessor post-open hooks at 1732390797916 (+3 ms)Region opened successfully at 1732390797916 2024-11-23T19:39:57,917 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732390797879 2024-11-23T19:39:57,919 DEBUG [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T19:39:57,919 INFO [RS_OPEN_META-regionserver/387b213c044a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T19:39:57,919 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=387b213c044a,40109,1732390796873 2024-11-23T19:39:57,920 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 387b213c044a,40109,1732390796873, state=OPEN 2024-11-23T19:39:57,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:39:57,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T19:39:57,987 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:57,987 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T19:39:57,987 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=387b213c044a,40109,1732390796873 2024-11-23T19:39:57,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T19:39:57,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=387b213c044a,40109,1732390796873 in 261 msec 2024-11-23T19:39:57,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T19:39:57,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 770 msec 2024-11-23T19:39:57,997 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T19:39:57,997 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T19:39:57,999 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:39:57,999 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,40109,1732390796873, seqNum=-1] 2024-11-23T19:39:57,999 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:39:58,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42061, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:39:58,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 814 msec 2024-11-23T19:39:58,006 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732390798006, completionTime=-1 2024-11-23T19:39:58,006 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T19:39:58,007 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T19:39:58,008 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-23T19:39:58,008 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732390858008 2024-11-23T19:39:58,008 INFO [master/387b213c044a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732390918008 2024-11-23T19:39:58,008 INFO [master/387b213c044a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-23T19:39:58,009 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43797,1732390796655-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,009 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43797,1732390796655-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,009 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43797,1732390796655-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,009 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-387b213c044a:43797, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,009 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,009 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,010 DEBUG [master/387b213c044a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.087sec 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43797,1732390796655-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T19:39:58,012 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43797,1732390796655-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T19:39:58,015 DEBUG [master/387b213c044a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T19:39:58,015 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T19:39:58,015 INFO [master/387b213c044a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=387b213c044a,43797,1732390796655-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T19:39:58,095 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c6a0fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:39:58,095 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 387b213c044a,43797,-1 for getting cluster id 2024-11-23T19:39:58,095 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T19:39:58,098 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3fb5a507-28b1-4a84-a4ab-0a3aa0c0bb1c' 2024-11-23T19:39:58,099 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T19:39:58,099 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3fb5a507-28b1-4a84-a4ab-0a3aa0c0bb1c" 2024-11-23T19:39:58,099 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569b041, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:39:58,099 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [387b213c044a,43797,-1] 2024-11-23T19:39:58,100 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T19:39:58,100 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:58,101 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57396, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T19:39:58,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d971f20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T19:39:58,102 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T19:39:58,103 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=387b213c044a,40109,1732390796873, seqNum=-1] 2024-11-23T19:39:58,103 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T19:39:58,104 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33146, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T19:39:58,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=387b213c044a,43797,1732390796655 2024-11-23T19:39:58,106 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T19:39:58,109 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-23T19:39:58,109 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-23T19:39:58,112 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs, maxLogs=32 2024-11-23T19:39:58,112 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732390798112 2024-11-23T19:39:58,118 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732390798112 2024-11-23T19:39:58,119 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43543:43543),(127.0.0.1/127.0.0.1:39161:39161)] 2024-11-23T19:39:58,120 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732390798120 2024-11-23T19:39:58,128 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,128 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,128 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,128 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732390798112 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732390798120 2024-11-23T19:39:58,129 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39161:39161),(127.0.0.1/127.0.0.1:43543:43543)] 2024-11-23T19:39:58,129 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732390798112 is not closed yet, will try archiving it next time 2024-11-23T19:39:58,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,130 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,130 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741835_1011 (size=93) 2024-11-23T19:39:58,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741835_1011 (size=93) 2024-11-23T19:39:58,130 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,130 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,132 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732390798112 to hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs/test.com%2C8080%2C1.1732390798112 2024-11-23T19:39:58,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741836_1012 (size=93) 2024-11-23T19:39:58,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741836_1012 (size=93) 2024-11-23T19:39:58,138 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs 2024-11-23T19:39:58,138 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732390798120) 2024-11-23T19:39:58,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T19:39:58,138 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:39:58,138 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:58,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:58,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:58,139 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T19:39:58,139 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T19:39:58,139 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1113890573, stopped=false 2024-11-23T19:39:58,139 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=387b213c044a,43797,1732390796655 2024-11-23T19:39:58,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:58,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T19:39:58,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:58,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:58,157 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:39:58,157 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T19:39:58,157 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:58,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:58,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:58,158 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '387b213c044a,40109,1732390796873' ***** 2024-11-23T19:39:58,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T19:39:58,158 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T19:39:58,158 INFO [RS:0;387b213c044a:40109 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T19:39:58,158 INFO [RS:0;387b213c044a:40109 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T19:39:58,158 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T19:39:58,158 INFO [RS:0;387b213c044a:40109 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T19:39:58,158 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(959): stopping server 387b213c044a,40109,1732390796873 2024-11-23T19:39:58,158 INFO [RS:0;387b213c044a:40109 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:39:58,158 INFO [RS:0;387b213c044a:40109 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;387b213c044a:40109. 2024-11-23T19:39:58,159 DEBUG [RS:0;387b213c044a:40109 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T19:39:58,159 DEBUG [RS:0;387b213c044a:40109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:58,159 INFO [RS:0;387b213c044a:40109 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T19:39:58,159 INFO [RS:0;387b213c044a:40109 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T19:39:58,159 INFO [RS:0;387b213c044a:40109 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T19:39:58,159 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T19:39:58,159 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T19:39:58,159 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T19:39:58,159 DEBUG [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-23T19:39:58,159 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T19:39:58,160 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T19:39:58,160 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T19:39:58,160 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T19:39:58,160 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T19:39:58,160 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-23T19:39:58,177 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/.tmp/ns/983ed92d5f504ffc87fd6f5b878bd579 is 43, key is default/ns:d/1732390798001/Put/seqid=0 2024-11-23T19:39:58,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741837_1013 (size=5153) 2024-11-23T19:39:58,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741837_1013 (size=5153) 2024-11-23T19:39:58,182 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/.tmp/ns/983ed92d5f504ffc87fd6f5b878bd579 2024-11-23T19:39:58,186 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/.tmp/ns/983ed92d5f504ffc87fd6f5b878bd579 as hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/ns/983ed92d5f504ffc87fd6f5b878bd579 2024-11-23T19:39:58,190 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/ns/983ed92d5f504ffc87fd6f5b878bd579, entries=2, sequenceid=6, filesize=5.0 K 2024-11-23T19:39:58,191 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-23T19:39:58,194 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T19:39:58,195 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T19:39:58,195 INFO [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:58,195 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732390798159Running coprocessor pre-close hooks at 1732390798159Disabling compacts and flushes for region at 1732390798159Disabling writes for close at 1732390798160 (+1 ms)Obtaining lock to block concurrent updates at 1732390798160Preparing flush snapshotting stores in 1588230740 at 1732390798160Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732390798160Flushing stores of hbase:meta,,1.1588230740 at 1732390798161 (+1 ms)Flushing 1588230740/ns: creating writer at 1732390798161Flushing 1588230740/ns: appending metadata at 1732390798177 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732390798177Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fcee419: reopening flushed file at 1732390798185 (+8 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1732390798191 (+6 ms)Writing region close event to WAL at 1732390798191Running coprocessor post-close hooks at 1732390798195 (+4 ms)Closed at 1732390798195 2024-11-23T19:39:58,195 DEBUG [RS_CLOSE_META-regionserver/387b213c044a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T19:39:58,251 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T19:39:58,251 INFO [regionserver/387b213c044a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T19:39:58,360 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(976): stopping server 387b213c044a,40109,1732390796873; all regions closed. 2024-11-23T19:39:58,361 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,361 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,362 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,362 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,363 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741834_1010 (size=1152) 2024-11-23T19:39:58,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741834_1010 (size=1152) 2024-11-23T19:39:58,372 DEBUG [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs 2024-11-23T19:39:58,372 INFO [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C40109%2C1732390796873.meta:.meta(num 1732390797892) 2024-11-23T19:39:58,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,373 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741833_1009 (size=93) 2024-11-23T19:39:58,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741833_1009 (size=93) 2024-11-23T19:39:58,378 DEBUG [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/oldWALs 2024-11-23T19:39:58,378 INFO [RS:0;387b213c044a:40109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 387b213c044a%2C40109%2C1732390796873:(num 1732390797386) 2024-11-23T19:39:58,378 DEBUG [RS:0;387b213c044a:40109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T19:39:58,378 INFO [RS:0;387b213c044a:40109 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T19:39:58,378 INFO [RS:0;387b213c044a:40109 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:39:58,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,379 INFO [RS:0;387b213c044a:40109 {}] hbase.ChoreService(370): Chore service for: regionserver/387b213c044a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T19:39:58,379 INFO [RS:0;387b213c044a:40109 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:39:58,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,379 INFO [regionserver/387b213c044a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:39:58,379 INFO [RS:0;387b213c044a:40109 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40109 2024-11-23T19:39:58,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-23T19:39:58,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T19:39:58,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/387b213c044a,40109,1732390796873 2024-11-23T19:39:58,409 INFO [RS:0;387b213c044a:40109 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:39:58,420 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [387b213c044a,40109,1732390796873] 2024-11-23T19:39:58,430 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/387b213c044a,40109,1732390796873 already deleted, retry=false 2024-11-23T19:39:58,430 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 387b213c044a,40109,1732390796873 expired; onlineServers=0 2024-11-23T19:39:58,430 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '387b213c044a,43797,1732390796655' ***** 2024-11-23T19:39:58,430 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T19:39:58,430 INFO [M:0;387b213c044a:43797 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T19:39:58,431 INFO [M:0;387b213c044a:43797 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T19:39:58,431 DEBUG [M:0;387b213c044a:43797 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T19:39:58,431 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T19:39:58,431 DEBUG [M:0;387b213c044a:43797 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T19:39:58,431 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390797196 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.small.0-1732390797196,5,FailOnTimeoutGroup] 2024-11-23T19:39:58,431 DEBUG [master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390797196 {}] cleaner.HFileCleaner(306): Exit Thread[master/387b213c044a:0:becomeActiveMaster-HFileCleaner.large.0-1732390797196,5,FailOnTimeoutGroup] 2024-11-23T19:39:58,431 INFO [M:0;387b213c044a:43797 {}] hbase.ChoreService(370): Chore service for: master/387b213c044a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T19:39:58,431 INFO [M:0;387b213c044a:43797 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T19:39:58,431 DEBUG [M:0;387b213c044a:43797 {}] master.HMaster(1795): Stopping service threads 2024-11-23T19:39:58,431 INFO [M:0;387b213c044a:43797 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T19:39:58,431 INFO [M:0;387b213c044a:43797 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T19:39:58,431 INFO [M:0;387b213c044a:43797 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T19:39:58,431 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T19:39:58,441 DEBUG [M:0;387b213c044a:43797 {}] zookeeper.ZKUtil(347): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T19:39:58,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T19:39:58,441 WARN [M:0;387b213c044a:43797 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T19:39:58,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T19:39:58,442 INFO [M:0;387b213c044a:43797 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/.lastflushedseqids 2024-11-23T19:39:58,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741838_1014 (size=99) 2024-11-23T19:39:58,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741838_1014 (size=99) 2024-11-23T19:39:58,450 INFO [M:0;387b213c044a:43797 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T19:39:58,450 INFO [M:0;387b213c044a:43797 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T19:39:58,450 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T19:39:58,450 INFO [M:0;387b213c044a:43797 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:58,450 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:58,450 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T19:39:58,450 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:58,450 INFO [M:0;387b213c044a:43797 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-23T19:39:58,467 DEBUG [M:0;387b213c044a:43797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7cf17ed67b304a32a3f3b69c7c070306 is 82, key is hbase:meta,,1/info:regioninfo/1732390797919/Put/seqid=0 2024-11-23T19:39:58,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741839_1015 (size=5672) 2024-11-23T19:39:58,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741839_1015 (size=5672) 2024-11-23T19:39:58,471 INFO [M:0;387b213c044a:43797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7cf17ed67b304a32a3f3b69c7c070306 2024-11-23T19:39:58,488 DEBUG [M:0;387b213c044a:43797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/56fa6a5e4291427f8367d081b4622f55 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732390798006/Put/seqid=0 2024-11-23T19:39:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741840_1016 (size=5275) 2024-11-23T19:39:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741840_1016 (size=5275) 2024-11-23T19:39:58,492 INFO [M:0;387b213c044a:43797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/56fa6a5e4291427f8367d081b4622f55 2024-11-23T19:39:58,510 DEBUG [M:0;387b213c044a:43797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bf2723c56c3c42f797fa54c61fbe18a7 is 69, key is 387b213c044a,40109,1732390796873/rs:state/1732390797231/Put/seqid=0 2024-11-23T19:39:58,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741841_1017 (size=5156) 2024-11-23T19:39:58,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741841_1017 (size=5156) 2024-11-23T19:39:58,515 INFO [M:0;387b213c044a:43797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bf2723c56c3c42f797fa54c61fbe18a7 2024-11-23T19:39:58,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:58,520 INFO [RS:0;387b213c044a:40109 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:39:58,520 INFO [RS:0;387b213c044a:40109 {}] regionserver.HRegionServer(1031): Exiting; stopping=387b213c044a,40109,1732390796873; zookeeper connection closed. 2024-11-23T19:39:58,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40109-0x1016935b90b0001, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:58,520 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@dcbc3e1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@dcbc3e1 2024-11-23T19:39:58,520 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T19:39:58,537 DEBUG [M:0;387b213c044a:43797 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/922dc1af5b9a474885119946698e59b6 is 52, key is load_balancer_on/state:d/1732390798108/Put/seqid=0 2024-11-23T19:39:58,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741842_1018 (size=5056) 2024-11-23T19:39:58,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741842_1018 (size=5056) 2024-11-23T19:39:58,541 INFO [M:0;387b213c044a:43797 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/922dc1af5b9a474885119946698e59b6 2024-11-23T19:39:58,545 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7cf17ed67b304a32a3f3b69c7c070306 as hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7cf17ed67b304a32a3f3b69c7c070306 2024-11-23T19:39:58,549 INFO [M:0;387b213c044a:43797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7cf17ed67b304a32a3f3b69c7c070306, entries=8, sequenceid=29, filesize=5.5 K 2024-11-23T19:39:58,550 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/56fa6a5e4291427f8367d081b4622f55 as hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/56fa6a5e4291427f8367d081b4622f55 2024-11-23T19:39:58,554 INFO [M:0;387b213c044a:43797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/56fa6a5e4291427f8367d081b4622f55, entries=3, sequenceid=29, filesize=5.2 K 2024-11-23T19:39:58,555 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bf2723c56c3c42f797fa54c61fbe18a7 as hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bf2723c56c3c42f797fa54c61fbe18a7 2024-11-23T19:39:58,559 INFO [M:0;387b213c044a:43797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bf2723c56c3c42f797fa54c61fbe18a7, entries=1, sequenceid=29, filesize=5.0 K 2024-11-23T19:39:58,560 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/922dc1af5b9a474885119946698e59b6 as hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/922dc1af5b9a474885119946698e59b6 2024-11-23T19:39:58,564 INFO [M:0;387b213c044a:43797 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44327/user/jenkins/test-data/3d487d82-15c4-661e-aa65-ec9a60e51bb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/922dc1af5b9a474885119946698e59b6, entries=1, sequenceid=29, filesize=4.9 K 2024-11-23T19:39:58,565 INFO [M:0;387b213c044a:43797 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=29, compaction requested=false 2024-11-23T19:39:58,567 INFO [M:0;387b213c044a:43797 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T19:39:58,567 DEBUG [M:0;387b213c044a:43797 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732390798450Disabling compacts and flushes for region at 1732390798450Disabling writes for close at 1732390798450Obtaining lock to block concurrent updates at 1732390798450Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732390798450Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732390798451 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732390798452 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732390798452Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732390798467 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732390798467Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732390798474 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732390798488 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732390798488Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732390798496 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732390798510 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732390798510Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732390798518 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732390798537 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732390798537Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@738fbaa7: reopening flushed file at 1732390798544 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@355aa742: reopening flushed file at 1732390798549 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@523f661a: reopening flushed file at 1732390798554 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7701f9a8: reopening flushed file at 1732390798559 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=29, compaction requested=false at 1732390798565 (+6 ms)Writing region close event to WAL at 1732390798567 (+2 ms)Closed at 1732390798567 2024-11-23T19:39:58,567 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,567 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,567 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,567 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,567 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-23T19:39:58,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38389 is added to blk_1073741830_1006 (size=10311) 2024-11-23T19:39:58,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40673 is added to blk_1073741830_1006 (size=10311) 2024-11-23T19:39:58,570 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T19:39:58,570 INFO [M:0;387b213c044a:43797 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T19:39:58,570 INFO [M:0;387b213c044a:43797 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43797 2024-11-23T19:39:58,570 INFO [M:0;387b213c044a:43797 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T19:39:58,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,44195,1732390602920/387b213c044a%2C44195%2C1732390602920.meta.1732390603946.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:58,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39041/user/jenkins/test-data/7ec7a203-cb7d-d40f-d1b0-c829fb9785ed/WALs/387b213c044a,37383,1732390604084/387b213c044a%2C37383%2C1732390604084.1732390604327 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-23T19:39:58,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:58,678 INFO [M:0;387b213c044a:43797 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T19:39:58,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43797-0x1016935b90b0000, quorum=127.0.0.1:63067, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T19:39:58,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2269c58e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:58,681 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@477d322e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:58,681 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:58,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72757a2f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:58,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3829c39e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:58,684 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:39:58,684 WARN [BP-1547216778-172.17.0.3-1732390793976 heartbeating to localhost/127.0.0.1:44327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:39:58,684 WARN [BP-1547216778-172.17.0.3-1732390793976 heartbeating to localhost/127.0.0.1:44327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1547216778-172.17.0.3-1732390793976 (Datanode Uuid a3749530-6142-4c75-a53b-73afaa186a72) service to localhost/127.0.0.1:44327 2024-11-23T19:39:58,684 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:39:58,685 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data3/current/BP-1547216778-172.17.0.3-1732390793976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:58,685 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data4/current/BP-1547216778-172.17.0.3-1732390793976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:58,685 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:39:58,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5eefcd14{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T19:39:58,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75728f34{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:58,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:58,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40d0e54c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:58,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7acdff1a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:58,690 WARN [BP-1547216778-172.17.0.3-1732390793976 heartbeating to localhost/127.0.0.1:44327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T19:39:58,690 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T19:39:58,690 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T19:39:58,690 WARN [BP-1547216778-172.17.0.3-1732390793976 heartbeating to localhost/127.0.0.1:44327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1547216778-172.17.0.3-1732390793976 (Datanode Uuid e77be7a5-85ce-494a-8206-d05d4e78af97) service to localhost/127.0.0.1:44327 2024-11-23T19:39:58,690 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data1/current/BP-1547216778-172.17.0.3-1732390793976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:58,691 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/cluster_a29a1960-be95-eaae-2f85-5d739400da92/data/data2/current/BP-1547216778-172.17.0.3-1732390793976 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T19:39:58,691 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T19:39:58,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b404f99{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T19:39:58,697 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3698cd86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T19:39:58,697 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T19:39:58,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7043bc8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T19:39:58,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49428dc3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ba9a070-7010-25f0-8d04-fb206976673e/hadoop.log.dir/,STOPPED} 2024-11-23T19:39:58,703 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T19:39:58,717 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-23T19:39:58,725 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 231) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44327 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44327 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44327 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=218 (was 211) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3106 (was 3115)